problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_32737
rasdani/github-patches
git_diff
dask__dask-586
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Additional improvement for da.image.imread I think following 2 improvements make the function better. #### 1. Allow custom `imread` function. Some prefer `opencv` which reads color in BGR order, otherwise `skimage` reads in RGB order. Adding `dialect` option (like `dialect='skimage'` or `dialect='cv'`) or accept different file read function may better. #### 2. Allow custom preprocessing function. In image processing, input images may have different sizes. In these case, preprocessing is needed between image read and array creation. This preprocessing function must return the same size of array on user's responsibility. </issue> <code> [start of dask/array/image.py] 1 from glob import glob 2 import os 3 4 try: 5 from skimage.io import imread as sk_imread 6 except ImportError: 7 pass 8 9 from .core import Array 10 from ..base import tokenize 11 12 def add_leading_dimension(x): 13 return x[None, ...] 14 15 16 def imread(filename): 17 """ Read a stack of images into a dask array 18 19 Parameters 20 ---------- 21 22 filename: string 23 A globstring like 'myfile.*.png' 24 25 Example 26 ------- 27 28 >>> from dask.array.image import imread 29 >>> im = imread('2015-*-*.png') # doctest: +SKIP 30 >>> im.shape # doctest: +SKIP 31 (365, 1000, 1000, 3) 32 33 Returns 34 ------- 35 36 Dask array of all images stacked along the first dimension. All images 37 will be treated as individual chunks 38 """ 39 filenames = sorted(glob(filename)) 40 if not filenames: 41 raise ValueError("No files found under name %s" % filename) 42 43 name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames)) 44 45 sample = sk_imread(filenames[0]) 46 47 dsk = dict(((name, i) + (0,) * len(sample.shape), 48 (add_leading_dimension, (sk_imread, filename))) 49 for i, filename in enumerate(filenames)) 50 51 chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape) 52 53 return Array(dsk, name, chunks, sample.dtype) 54 [end of dask/array/image.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dask/array/image.py b/dask/array/image.py --- a/dask/array/image.py +++ b/dask/array/image.py @@ -13,7 +13,7 @@ return x[None, ...] -def imread(filename): +def imread(filename, imread=None, preprocess=None): """ Read a stack of images into a dask array Parameters @@ -21,6 +21,13 @@ filename: string A globstring like 'myfile.*.png' + imread: function (optional) + Optionally provide custom imread function. + Function should expect a filename and produce a numpy array. + Defaults to ``skimage.io.imread``. + preprocess: function (optional) + Optionally provide custom function to preprocess the image. + Function should expect a numpy array for a single image. Example ------- @@ -36,17 +43,25 @@ Dask array of all images stacked along the first dimension. All images will be treated as individual chunks """ + imread = imread or sk_imread filenames = sorted(glob(filename)) if not filenames: raise ValueError("No files found under name %s" % filename) name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames)) - sample = sk_imread(filenames[0]) - - dsk = dict(((name, i) + (0,) * len(sample.shape), - (add_leading_dimension, (sk_imread, filename))) - for i, filename in enumerate(filenames)) + sample = imread(filenames[0]) + if preprocess: + sample = preprocess(sample) + + keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))] + if preprocess: + values = [(add_leading_dimension, (preprocess, (imread, filename))) + for filename in filenames] + else: + values = [(add_leading_dimension, (imread, filename)) + for filename in filenames] + dsk = dict(zip(keys, values)) chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)
{"golden_diff": "diff --git a/dask/array/image.py b/dask/array/image.py\n--- a/dask/array/image.py\n+++ b/dask/array/image.py\n@@ -13,7 +13,7 @@\n return x[None, ...]\n \n \n-def imread(filename):\n+def imread(filename, imread=None, preprocess=None):\n \"\"\" Read a stack of images into a dask array\n \n Parameters\n@@ -21,6 +21,13 @@\n \n filename: string\n A globstring like 'myfile.*.png'\n+ imread: function (optional)\n+ Optionally provide custom imread function.\n+ Function should expect a filename and produce a numpy array.\n+ Defaults to ``skimage.io.imread``.\n+ preprocess: function (optional)\n+ Optionally provide custom function to preprocess the image.\n+ Function should expect a numpy array for a single image.\n \n Example\n -------\n@@ -36,17 +43,25 @@\n Dask array of all images stacked along the first dimension. All images\n will be treated as individual chunks\n \"\"\"\n+ imread = imread or sk_imread\n filenames = sorted(glob(filename))\n if not filenames:\n raise ValueError(\"No files found under name %s\" % filename)\n \n name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames))\n \n- sample = sk_imread(filenames[0])\n-\n- dsk = dict(((name, i) + (0,) * len(sample.shape),\n- (add_leading_dimension, (sk_imread, filename)))\n- for i, filename in enumerate(filenames))\n+ sample = imread(filenames[0])\n+ if preprocess:\n+ sample = preprocess(sample)\n+\n+ keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))]\n+ if preprocess:\n+ values = [(add_leading_dimension, (preprocess, (imread, filename)))\n+ for filename in filenames]\n+ else:\n+ values = [(add_leading_dimension, (imread, filename))\n+ for filename in filenames]\n+ dsk = dict(zip(keys, values))\n \n chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)\n", "issue": "Additional improvement for da.image.imread\nI think following 2 improvements make the function better.\n#### 1. Allow custom `imread` function.\n\nSome prefer `opencv` which reads color in BGR order, otherwise `skimage` reads in RGB order. Adding `dialect` option (like `dialect='skimage'` or `dialect='cv'`) or accept different file read function may better.\n#### 2. Allow custom preprocessing function.\n\nIn image processing, input images may have different sizes. In these case, preprocessing is needed between image read and array creation. This preprocessing function must return the same size of array on user's responsibility.\n\n", "before_files": [{"content": "from glob import glob\nimport os\n\ntry:\n from skimage.io import imread as sk_imread\nexcept ImportError:\n pass\n\nfrom .core import Array\nfrom ..base import tokenize\n\ndef add_leading_dimension(x):\n return x[None, ...]\n\n\ndef imread(filename):\n \"\"\" Read a stack of images into a dask array\n\n Parameters\n ----------\n\n filename: string\n A globstring like 'myfile.*.png'\n\n Example\n -------\n\n >>> from dask.array.image import imread\n >>> im = imread('2015-*-*.png') # doctest: +SKIP\n >>> im.shape # doctest: +SKIP\n (365, 1000, 1000, 3)\n\n Returns\n -------\n\n Dask array of all images stacked along the first dimension. All images\n will be treated as individual chunks\n \"\"\"\n filenames = sorted(glob(filename))\n if not filenames:\n raise ValueError(\"No files found under name %s\" % filename)\n\n name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames))\n\n sample = sk_imread(filenames[0])\n\n dsk = dict(((name, i) + (0,) * len(sample.shape),\n (add_leading_dimension, (sk_imread, filename)))\n for i, filename in enumerate(filenames))\n\n chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)\n\n return Array(dsk, name, chunks, sample.dtype)\n", "path": "dask/array/image.py"}]}
1,120
498
gh_patches_debug_3876
rasdani/github-patches
git_diff
xorbitsai__inference-299
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> FEAT: Disable Gradio Telemetry Pull requests are disabled but see here: https://github.com/arch-btw/inference/pull/1 </issue> <code> [start of examples/gradio_chatinterface.py] 1 from typing import Dict, List 2 3 import gradio as gr 4 5 from xinference.client import Client 6 7 if __name__ == "__main__": 8 import argparse 9 import textwrap 10 11 parser = argparse.ArgumentParser( 12 formatter_class=argparse.RawDescriptionHelpFormatter, 13 epilog=textwrap.dedent( 14 """\ 15 instructions to run: 16 1. Install Xinference and Llama-cpp-python 17 2. Run 'xinference --host "localhost" --port 9997' in terminal 18 3. Run this python file in new terminal window 19 20 e.g. (feel free to copy) 21 python gradio_chatinterface.py \\ 22 --endpoint http://localhost:9997 \\ 23 --model_name vicuna-v1.3 \\ 24 --model_size_in_billions 7 \\ 25 --model_format ggmlv3 \\ 26 --quantization q2_K 27 28 If you decide to change the port number in step 2, 29 please also change the endpoint in the arguments 30 """ 31 ), 32 ) 33 34 parser.add_argument( 35 "--endpoint", type=str, required=True, help="Xinference endpoint, required" 36 ) 37 parser.add_argument( 38 "--model_name", type=str, required=True, help="Name of the model, required" 39 ) 40 parser.add_argument( 41 "--model_size_in_billions", 42 type=int, 43 required=False, 44 help="Size of the model in billions", 45 ) 46 parser.add_argument( 47 "--model_format", 48 type=str, 49 required=False, 50 help="Format of the model", 51 ) 52 parser.add_argument( 53 "--quantization", type=str, required=False, help="Quantization of the model" 54 ) 55 56 args = parser.parse_args() 57 58 endpoint = args.endpoint 59 model_name = args.model_name 60 model_size_in_billions = args.model_size_in_billions 61 model_format = args.model_format 62 quantization = args.quantization 63 64 print(f"Xinference endpoint: {endpoint}") 65 print(f"Model Name: {model_name}") 66 print(f"Model Size (in billions): {model_size_in_billions}") 67 print(f"Model Format: {model_format}") 68 print(f"Quantization: {quantization}") 69 70 client = Client(endpoint) 71 model_uid = client.launch_model( 72 model_name, 73 model_size_in_billions=model_size_in_billions, 74 model_format=model_format, 75 quantization=quantization, 76 n_ctx=2048, 77 ) 78 model = client.get_model(model_uid) 79 80 def flatten(matrix: List[List[str]]) -> List[str]: 81 flat_list = [] 82 for row in matrix: 83 flat_list += row 84 return flat_list 85 86 def to_chat(lst: List[str]) -> List[Dict[str, str]]: 87 res = [] 88 for i in range(len(lst)): 89 role = "assistant" if i % 2 == 1 else "user" 90 res.append( 91 { 92 "role": role, 93 "content": lst[i], 94 } 95 ) 96 return res 97 98 def generate_wrapper(message: str, history: List[List[str]]) -> str: 99 output = model.chat( 100 prompt=message, 101 chat_history=to_chat(flatten(history)), 102 generate_config={"max_tokens": 512, "stream": False}, 103 ) 104 return output["choices"][0]["message"]["content"] 105 106 demo = gr.ChatInterface( 107 fn=generate_wrapper, 108 examples=[ 109 "Show me a two sentence horror story with a plot twist", 110 "Generate a Haiku poem using trignometry as the central theme", 111 "Write three sentences of scholarly description regarding a supernatural beast", 112 "Prove there does not exist a largest integer", 113 ], 114 title="Xinference Chat Bot", 115 ) 116 demo.launch() 117 [end of examples/gradio_chatinterface.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/gradio_chatinterface.py b/examples/gradio_chatinterface.py --- a/examples/gradio_chatinterface.py +++ b/examples/gradio_chatinterface.py @@ -105,6 +105,7 @@ demo = gr.ChatInterface( fn=generate_wrapper, + analytics_enabled=False, examples=[ "Show me a two sentence horror story with a plot twist", "Generate a Haiku poem using trignometry as the central theme",
{"golden_diff": "diff --git a/examples/gradio_chatinterface.py b/examples/gradio_chatinterface.py\n--- a/examples/gradio_chatinterface.py\n+++ b/examples/gradio_chatinterface.py\n@@ -105,6 +105,7 @@\n \n demo = gr.ChatInterface(\n fn=generate_wrapper,\n+ analytics_enabled=False,\n examples=[\n \"Show me a two sentence horror story with a plot twist\",\n \"Generate a Haiku poem using trignometry as the central theme\",\n", "issue": "FEAT: Disable Gradio Telemetry\nPull requests are disabled but see here:\r\n\r\nhttps://github.com/arch-btw/inference/pull/1\n", "before_files": [{"content": "from typing import Dict, List\n\nimport gradio as gr\n\nfrom xinference.client import Client\n\nif __name__ == \"__main__\":\n import argparse\n import textwrap\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=textwrap.dedent(\n \"\"\"\\\n instructions to run:\n 1. Install Xinference and Llama-cpp-python\n 2. Run 'xinference --host \"localhost\" --port 9997' in terminal\n 3. Run this python file in new terminal window\n\n e.g. (feel free to copy)\n python gradio_chatinterface.py \\\\\n --endpoint http://localhost:9997 \\\\\n --model_name vicuna-v1.3 \\\\\n --model_size_in_billions 7 \\\\\n --model_format ggmlv3 \\\\\n --quantization q2_K\n\n If you decide to change the port number in step 2,\n please also change the endpoint in the arguments\n \"\"\"\n ),\n )\n\n parser.add_argument(\n \"--endpoint\", type=str, required=True, help=\"Xinference endpoint, required\"\n )\n parser.add_argument(\n \"--model_name\", type=str, required=True, help=\"Name of the model, required\"\n )\n parser.add_argument(\n \"--model_size_in_billions\",\n type=int,\n required=False,\n help=\"Size of the model in billions\",\n )\n parser.add_argument(\n \"--model_format\",\n type=str,\n required=False,\n help=\"Format of the model\",\n )\n parser.add_argument(\n \"--quantization\", type=str, required=False, help=\"Quantization of the model\"\n )\n\n args = parser.parse_args()\n\n endpoint = args.endpoint\n model_name = args.model_name\n model_size_in_billions = args.model_size_in_billions\n model_format = args.model_format\n quantization = args.quantization\n\n print(f\"Xinference endpoint: {endpoint}\")\n print(f\"Model Name: {model_name}\")\n print(f\"Model Size (in billions): {model_size_in_billions}\")\n print(f\"Model Format: {model_format}\")\n print(f\"Quantization: {quantization}\")\n\n client = Client(endpoint)\n model_uid = client.launch_model(\n model_name,\n model_size_in_billions=model_size_in_billions,\n model_format=model_format,\n quantization=quantization,\n n_ctx=2048,\n )\n model = client.get_model(model_uid)\n\n def flatten(matrix: List[List[str]]) -> List[str]:\n flat_list = []\n for row in matrix:\n flat_list += row\n return flat_list\n\n def to_chat(lst: List[str]) -> List[Dict[str, str]]:\n res = []\n for i in range(len(lst)):\n role = \"assistant\" if i % 2 == 1 else \"user\"\n res.append(\n {\n \"role\": role,\n \"content\": lst[i],\n }\n )\n return res\n\n def generate_wrapper(message: str, history: List[List[str]]) -> str:\n output = model.chat(\n prompt=message,\n chat_history=to_chat(flatten(history)),\n generate_config={\"max_tokens\": 512, \"stream\": False},\n )\n return output[\"choices\"][0][\"message\"][\"content\"]\n\n demo = gr.ChatInterface(\n fn=generate_wrapper,\n examples=[\n \"Show me a two sentence horror story with a plot twist\",\n \"Generate a Haiku poem using trignometry as the central theme\",\n \"Write three sentences of scholarly description regarding a supernatural beast\",\n \"Prove there does not exist a largest integer\",\n ],\n title=\"Xinference Chat Bot\",\n )\n demo.launch()\n", "path": "examples/gradio_chatinterface.py"}]}
1,629
104
gh_patches_debug_4863
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-1210
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PDF Export URL pattern ### Describe the Bug The web app calls `/REGION/LANG/wp-json/ig-mpdf/v1/pdf` to export a PDF which returns a 404. Our API currently uses `REGION/LANG/pdf`. The normal mapping does not work, as we ### Steps to Reproduce ```shell curl 'https://malte-test.tuerantuer.org/joerdenstorf/de/wp-json/ig-mpdf/v1/pdf' ``` ### Expected Behavior Map old URL pattern to new endpoint. ### Actual Behavior 404 </issue> <code> [start of integreat_cms/api/urls.py] 1 """ 2 Expansion of API-Endpoints for the CMS 3 """ 4 from django.urls import include, path, re_path 5 6 from .v3.events import events 7 from .v3.feedback import ( 8 page_feedback, 9 search_result_feedback, 10 region_feedback, 11 offer_feedback, 12 offer_list_feedback, 13 event_list_feedback, 14 event_feedback, 15 poi_feedback, 16 map_feedback, 17 imprint_page_feedback, 18 legacy_feedback_endpoint, 19 ) 20 from .v3.imprint import imprint 21 from .v3.languages import languages 22 from .v3.locations import locations 23 from .v3.pages import pages, children, parents, single_page 24 from .v3.pdf_export import pdf_export 25 from .v3.push_notifications import sent_push_notifications 26 from .v3.regions import regions, liveregions, hiddenregions 27 from .v3.offers import offers 28 29 30 #: The namespace for this URL config (see :attr:`django.urls.ResolverMatch.app_name`) 31 app_name = "api" 32 33 content_api_urlpatterns = [ 34 path("pages/", pages, name="pages"), 35 path("locations/", locations, name="locations"), 36 path("events/", events, name="events"), 37 path("page/", single_page, name="single_page"), 38 path("post/", single_page, name="single_page"), 39 path("children/", children, name="children"), 40 path("parents/", parents, name="parents"), 41 path("pdf/", pdf_export, name="pdf_export"), 42 path( 43 "sent_push_notifications/", 44 sent_push_notifications, 45 name="sent_push_notifications", 46 ), 47 path("imprint/", imprint, name="imprint"), 48 path("disclaimer/", imprint, name="imprint"), 49 path("offers/", offers, name="offers"), 50 path("extras/", offers, name="offers"), 51 re_path( 52 r"^feedback/?$", 53 legacy_feedback_endpoint.legacy_feedback_endpoint, 54 name="legacy_feedback_endpoint", 55 ), 56 path( 57 "feedback/", 58 include( 59 [ 60 re_path( 61 r"^categories/?$", 62 region_feedback.region_feedback, 63 name="region_feedback", 64 ), 65 re_path(r"^page/?$", page_feedback.page_feedback, name="page_feedback"), 66 re_path(r"^poi/?$", poi_feedback.poi_feedback, name="poi_feedback"), 67 re_path( 68 r"^event/?$", event_feedback.event_feedback, name="event_feedback" 69 ), 70 re_path( 71 r"^events/?$", 72 event_list_feedback.event_list_feedback, 73 name="event_list_feedback", 74 ), 75 re_path( 76 r"^imprint-page/?$", 77 imprint_page_feedback.imprint_page_feedback, 78 name="imprint_page_feedbacks", 79 ), 80 re_path(r"^map/?$", map_feedback.map_feedback, name="map_feedback"), 81 re_path( 82 r"^search/?$", 83 search_result_feedback.search_result_feedback, 84 name="search_result_feedback", 85 ), 86 re_path( 87 r"^offers/?$", 88 offer_list_feedback.offer_list_feedback, 89 name="offer_list_feedback", 90 ), 91 re_path( 92 r"^extras/?$", 93 offer_list_feedback.offer_list_feedback, 94 name="offer_list_feedback", 95 ), 96 re_path( 97 r"^offer/?$", offer_feedback.offer_feedback, name="offer_feedback" 98 ), 99 re_path( 100 r"^extra/?$", offer_feedback.offer_feedback, name="offer_feedback" 101 ), 102 ] 103 ), 104 ), 105 ] 106 107 region_api_urlpatterns = [ 108 path("", regions, name="regions"), 109 path("live/", liveregions, name="regions_live"), 110 path("hidden/", hiddenregions, name="regions_hidden"), 111 ] 112 113 #: The url patterns of this module (see :doc:`topics/http/urls`) 114 urlpatterns = [ 115 path("api/regions/", include(region_api_urlpatterns)), 116 path("wp-json/extensions/v3/sites/", include(region_api_urlpatterns)), 117 path( 118 "api/<slug:region_slug>/", 119 include( 120 [ 121 path("languages/", languages, name="languages"), 122 path("offers/", offers, name="offers"), 123 path("extras/", offers, name="offers"), 124 path("<slug:language_slug>/", include(content_api_urlpatterns)), 125 ] 126 ), 127 ), 128 path( 129 "<slug:region_slug>/", 130 include( 131 [ 132 path( 133 "de/wp-json/extensions/v3/languages/", languages, name="languages" 134 ), 135 path( 136 "<slug:language_slug>/wp-json/extensions/v3/", 137 include(content_api_urlpatterns), 138 ), 139 ] 140 ), 141 ), 142 ] 143 [end of integreat_cms/api/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/integreat_cms/api/urls.py b/integreat_cms/api/urls.py --- a/integreat_cms/api/urls.py +++ b/integreat_cms/api/urls.py @@ -136,6 +136,11 @@ "<slug:language_slug>/wp-json/extensions/v3/", include(content_api_urlpatterns), ), + path( + "<slug:language_slug>/wp-json/ig-mpdf/v1/pdf/", + pdf_export, + name="pdf_export", + ), ] ), ),
{"golden_diff": "diff --git a/integreat_cms/api/urls.py b/integreat_cms/api/urls.py\n--- a/integreat_cms/api/urls.py\n+++ b/integreat_cms/api/urls.py\n@@ -136,6 +136,11 @@\n \"<slug:language_slug>/wp-json/extensions/v3/\",\n include(content_api_urlpatterns),\n ),\n+ path(\n+ \"<slug:language_slug>/wp-json/ig-mpdf/v1/pdf/\",\n+ pdf_export,\n+ name=\"pdf_export\",\n+ ),\n ]\n ),\n ),\n", "issue": "PDF Export URL pattern\n### Describe the Bug\r\nThe web app calls `/REGION/LANG/wp-json/ig-mpdf/v1/pdf` to export a PDF which returns a 404. Our API currently uses `REGION/LANG/pdf`.\r\n\r\nThe normal mapping does not work, as we\r\n\r\n### Steps to Reproduce\r\n\r\n```shell\r\ncurl 'https://malte-test.tuerantuer.org/joerdenstorf/de/wp-json/ig-mpdf/v1/pdf'\r\n```\r\n\r\n### Expected Behavior\r\nMap old URL pattern to new endpoint.\r\n\r\n\r\n### Actual Behavior\r\n404\n", "before_files": [{"content": "\"\"\"\nExpansion of API-Endpoints for the CMS\n\"\"\"\nfrom django.urls import include, path, re_path\n\nfrom .v3.events import events\nfrom .v3.feedback import (\n page_feedback,\n search_result_feedback,\n region_feedback,\n offer_feedback,\n offer_list_feedback,\n event_list_feedback,\n event_feedback,\n poi_feedback,\n map_feedback,\n imprint_page_feedback,\n legacy_feedback_endpoint,\n)\nfrom .v3.imprint import imprint\nfrom .v3.languages import languages\nfrom .v3.locations import locations\nfrom .v3.pages import pages, children, parents, single_page\nfrom .v3.pdf_export import pdf_export\nfrom .v3.push_notifications import sent_push_notifications\nfrom .v3.regions import regions, liveregions, hiddenregions\nfrom .v3.offers import offers\n\n\n#: The namespace for this URL config (see :attr:`django.urls.ResolverMatch.app_name`)\napp_name = \"api\"\n\ncontent_api_urlpatterns = [\n path(\"pages/\", pages, name=\"pages\"),\n path(\"locations/\", locations, name=\"locations\"),\n path(\"events/\", events, name=\"events\"),\n path(\"page/\", single_page, name=\"single_page\"),\n path(\"post/\", single_page, name=\"single_page\"),\n path(\"children/\", children, name=\"children\"),\n path(\"parents/\", parents, name=\"parents\"),\n path(\"pdf/\", pdf_export, name=\"pdf_export\"),\n path(\n \"sent_push_notifications/\",\n sent_push_notifications,\n name=\"sent_push_notifications\",\n ),\n path(\"imprint/\", imprint, name=\"imprint\"),\n path(\"disclaimer/\", imprint, name=\"imprint\"),\n path(\"offers/\", offers, name=\"offers\"),\n path(\"extras/\", offers, name=\"offers\"),\n re_path(\n r\"^feedback/?$\",\n legacy_feedback_endpoint.legacy_feedback_endpoint,\n name=\"legacy_feedback_endpoint\",\n ),\n path(\n \"feedback/\",\n include(\n [\n re_path(\n r\"^categories/?$\",\n region_feedback.region_feedback,\n name=\"region_feedback\",\n ),\n re_path(r\"^page/?$\", page_feedback.page_feedback, name=\"page_feedback\"),\n re_path(r\"^poi/?$\", poi_feedback.poi_feedback, name=\"poi_feedback\"),\n re_path(\n r\"^event/?$\", event_feedback.event_feedback, name=\"event_feedback\"\n ),\n re_path(\n r\"^events/?$\",\n event_list_feedback.event_list_feedback,\n name=\"event_list_feedback\",\n ),\n re_path(\n r\"^imprint-page/?$\",\n imprint_page_feedback.imprint_page_feedback,\n name=\"imprint_page_feedbacks\",\n ),\n re_path(r\"^map/?$\", map_feedback.map_feedback, name=\"map_feedback\"),\n re_path(\n r\"^search/?$\",\n search_result_feedback.search_result_feedback,\n name=\"search_result_feedback\",\n ),\n re_path(\n r\"^offers/?$\",\n offer_list_feedback.offer_list_feedback,\n name=\"offer_list_feedback\",\n ),\n re_path(\n r\"^extras/?$\",\n offer_list_feedback.offer_list_feedback,\n name=\"offer_list_feedback\",\n ),\n re_path(\n r\"^offer/?$\", offer_feedback.offer_feedback, name=\"offer_feedback\"\n ),\n re_path(\n r\"^extra/?$\", offer_feedback.offer_feedback, name=\"offer_feedback\"\n ),\n ]\n ),\n ),\n]\n\nregion_api_urlpatterns = [\n path(\"\", regions, name=\"regions\"),\n path(\"live/\", liveregions, name=\"regions_live\"),\n path(\"hidden/\", hiddenregions, name=\"regions_hidden\"),\n]\n\n#: The url patterns of this module (see :doc:`topics/http/urls`)\nurlpatterns = [\n path(\"api/regions/\", include(region_api_urlpatterns)),\n path(\"wp-json/extensions/v3/sites/\", include(region_api_urlpatterns)),\n path(\n \"api/<slug:region_slug>/\",\n include(\n [\n path(\"languages/\", languages, name=\"languages\"),\n path(\"offers/\", offers, name=\"offers\"),\n path(\"extras/\", offers, name=\"offers\"),\n path(\"<slug:language_slug>/\", include(content_api_urlpatterns)),\n ]\n ),\n ),\n path(\n \"<slug:region_slug>/\",\n include(\n [\n path(\n \"de/wp-json/extensions/v3/languages/\", languages, name=\"languages\"\n ),\n path(\n \"<slug:language_slug>/wp-json/extensions/v3/\",\n include(content_api_urlpatterns),\n ),\n ]\n ),\n ),\n]\n", "path": "integreat_cms/api/urls.py"}]}
1,935
130
gh_patches_debug_29434
rasdani/github-patches
git_diff
plone__Products.CMFPlone-1515
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Resources from third party add-ons are not being included in compiled plone-legacy bundle Seems JS resources registered in Plone 5 using old approach (`jsregistry.xml`) are not included in the final compilation: I installed an add-on and, even as I can see the JS resources listed in `default.js`, the source code is not present. If I enable development mode, then I can see the source code included in `plone-legacy-compiled.js` and it's executed normally. </issue> <code> [start of Products/CMFPlone/resources/browser/combine.py] 1 from zExceptions import NotFound 2 from Acquisition import aq_base 3 from datetime import datetime 4 from plone.registry.interfaces import IRegistry 5 from plone.resource.file import FilesystemFile 6 from plone.resource.interfaces import IResourceDirectory 7 from Products.CMFPlone.interfaces import IBundleRegistry 8 from Products.CMFPlone.interfaces.resources import ( 9 OVERRIDE_RESOURCE_DIRECTORY_NAME, 10 ) 11 from StringIO import StringIO 12 from zope.component import getUtility 13 from zope.component import queryUtility 14 15 PRODUCTION_RESOURCE_DIRECTORY = "production" 16 17 18 def get_production_resource_directory(): 19 persistent_directory = queryUtility(IResourceDirectory, name="persistent") 20 if persistent_directory is None: 21 return '' 22 container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME] 23 try: 24 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY] 25 except NotFound: 26 return "%s/++unique++1" % PRODUCTION_RESOURCE_DIRECTORY 27 timestamp = production_folder.readFile('timestamp.txt') 28 return "%s/++unique++%s" % ( 29 PRODUCTION_RESOURCE_DIRECTORY, timestamp) 30 31 32 def get_resource(context, path): 33 resource = context.unrestrictedTraverse(path) 34 if isinstance(resource, FilesystemFile): 35 (directory, sep, filename) = path.rpartition('/') 36 return context.unrestrictedTraverse(directory).readFile(filename) 37 else: 38 if hasattr(aq_base(resource), 'GET'): 39 # for FileResource 40 return resource.GET() 41 else: 42 # any BrowserView 43 return resource() 44 45 46 def write_js(context, folder, meta_bundle): 47 registry = getUtility(IRegistry) 48 resources = [] 49 50 # default resources 51 if meta_bundle == 'default' and registry.records.get( 52 'plone.resources/jquery.js' 53 ): 54 resources.append(get_resource(context, 55 registry.records['plone.resources/jquery.js'].value)) 56 resources.append(get_resource(context, 57 registry.records['plone.resources.requirejs'].value)) 58 resources.append(get_resource(context, 59 registry.records['plone.resources.configjs'].value)) 60 61 # bundles 62 bundles = registry.collectionOfInterface( 63 IBundleRegistry, prefix="plone.bundles", check=False) 64 for bundle in bundles.values(): 65 if bundle.merge_with == meta_bundle: 66 resources.append(get_resource(context, bundle.jscompilation)) 67 68 fi = StringIO() 69 for script in resources: 70 fi.write(script + '\n') 71 folder.writeFile(meta_bundle + ".js", fi) 72 73 74 def write_css(context, folder, meta_bundle): 75 registry = getUtility(IRegistry) 76 resources = [] 77 78 bundles = registry.collectionOfInterface( 79 IBundleRegistry, prefix="plone.bundles", check=False) 80 for bundle in bundles.values(): 81 if bundle.merge_with == meta_bundle: 82 resources.append(get_resource(context, bundle.csscompilation)) 83 84 fi = StringIO() 85 for script in resources: 86 fi.write(script + '\n') 87 folder.writeFile(meta_bundle + ".css", fi) 88 89 90 def combine_bundles(context): 91 persistent_directory = queryUtility(IResourceDirectory, name="persistent") 92 if persistent_directory is None: 93 return 94 if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory: 95 persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME) 96 container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME] 97 if PRODUCTION_RESOURCE_DIRECTORY not in container: 98 container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY) 99 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY] 100 101 # store timestamp 102 fi = StringIO() 103 fi.write(datetime.now().isoformat()) 104 production_folder.writeFile("timestamp.txt", fi) 105 106 # generate new combined bundles 107 write_js(context, production_folder, 'default') 108 write_js(context, production_folder, 'logged-in') 109 write_css(context, production_folder, 'default') 110 write_css(context, production_folder, 'logged-in') 111 [end of Products/CMFPlone/resources/browser/combine.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py --- a/Products/CMFPlone/resources/browser/combine.py +++ b/Products/CMFPlone/resources/browser/combine.py @@ -30,6 +30,14 @@ def get_resource(context, path): + if path.startswith('++plone++'): + # ++plone++ resources can be customized, we return their override + # value if any + overrides = get_override_directory(context) + filepath = path[9:] + if overrides.isFile(filepath): + return overrides.readFile(filepath) + resource = context.unrestrictedTraverse(path) if isinstance(resource, FilesystemFile): (directory, sep, filename) = path.rpartition('/') @@ -87,13 +95,17 @@ folder.writeFile(meta_bundle + ".css", fi) -def combine_bundles(context): +def get_override_directory(context): persistent_directory = queryUtility(IResourceDirectory, name="persistent") if persistent_directory is None: return if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory: persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME) - container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME] + return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME] + + +def combine_bundles(context): + container = get_override_directory(context) if PRODUCTION_RESOURCE_DIRECTORY not in container: container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY) production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]
{"golden_diff": "diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py\n--- a/Products/CMFPlone/resources/browser/combine.py\n+++ b/Products/CMFPlone/resources/browser/combine.py\n@@ -30,6 +30,14 @@\n \n \n def get_resource(context, path):\n+ if path.startswith('++plone++'):\n+ # ++plone++ resources can be customized, we return their override\n+ # value if any\n+ overrides = get_override_directory(context)\n+ filepath = path[9:]\n+ if overrides.isFile(filepath):\n+ return overrides.readFile(filepath)\n+\n resource = context.unrestrictedTraverse(path)\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n@@ -87,13 +95,17 @@\n folder.writeFile(meta_bundle + \".css\", fi)\n \n \n-def combine_bundles(context):\n+def get_override_directory(context):\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n- container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n+ return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n+\n+\n+def combine_bundles(context):\n+ container = get_override_directory(context)\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n", "issue": "Resources from third party add-ons are not being included in compiled plone-legacy bundle\nSeems JS resources registered in Plone 5 using old approach (`jsregistry.xml`) are not included in the final compilation: I installed an add-on and, even as I can see the JS resources listed in `default.js`, the source code is not present.\n\nIf I enable development mode, then I can see the source code included in `plone-legacy-compiled.js` and it's executed normally.\n\n", "before_files": [{"content": "from zExceptions import NotFound\nfrom Acquisition import aq_base\nfrom datetime import datetime\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.file import FilesystemFile\nfrom plone.resource.interfaces import IResourceDirectory\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces.resources import (\n OVERRIDE_RESOURCE_DIRECTORY_NAME,\n)\nfrom StringIO import StringIO\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\n\nPRODUCTION_RESOURCE_DIRECTORY = \"production\"\n\n\ndef get_production_resource_directory():\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return ''\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n try:\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return \"%s/++unique++1\" % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return \"%s/++unique++%s\" % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n\n\ndef get_resource(context, path):\n resource = context.unrestrictedTraverse(path)\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n else:\n if hasattr(aq_base(resource), 'GET'):\n # for FileResource\n return resource.GET()\n else:\n # any BrowserView\n return resource()\n\n\ndef write_js(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n # default resources\n if meta_bundle == 'default' and registry.records.get(\n 'plone.resources/jquery.js'\n ):\n resources.append(get_resource(context,\n registry.records['plone.resources/jquery.js'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.requirejs'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.configjs'].value))\n\n # bundles\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle:\n resources.append(get_resource(context, bundle.jscompilation))\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".js\", fi)\n\n\ndef write_css(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle:\n resources.append(get_resource(context, bundle.csscompilation))\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".css\", fi)\n\n\ndef combine_bundles(context):\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n\n # store timestamp\n fi = StringIO()\n fi.write(datetime.now().isoformat())\n production_folder.writeFile(\"timestamp.txt\", fi)\n\n # generate new combined bundles\n write_js(context, production_folder, 'default')\n write_js(context, production_folder, 'logged-in')\n write_css(context, production_folder, 'default')\n write_css(context, production_folder, 'logged-in')\n", "path": "Products/CMFPlone/resources/browser/combine.py"}]}
1,666
339
gh_patches_debug_22011
rasdani/github-patches
git_diff
docker__docker-py-1330
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add docker network IPAM options parameter IPAM driver missing options supports an options field in the IPAM config It introduced in API v1.22. ``` POST /networks/create Now supports an options field in the IPAM config that provides options for custom IPAM plugins. ``` </issue> <code> [start of docker/types/networks.py] 1 from .. import errors 2 from ..utils import normalize_links, version_lt 3 4 5 class EndpointConfig(dict): 6 def __init__(self, version, aliases=None, links=None, ipv4_address=None, 7 ipv6_address=None, link_local_ips=None): 8 if version_lt(version, '1.22'): 9 raise errors.InvalidVersion( 10 'Endpoint config is not supported for API version < 1.22' 11 ) 12 13 if aliases: 14 self["Aliases"] = aliases 15 16 if links: 17 self["Links"] = normalize_links(links) 18 19 ipam_config = {} 20 if ipv4_address: 21 ipam_config['IPv4Address'] = ipv4_address 22 23 if ipv6_address: 24 ipam_config['IPv6Address'] = ipv6_address 25 26 if link_local_ips is not None: 27 if version_lt(version, '1.24'): 28 raise errors.InvalidVersion( 29 'link_local_ips is not supported for API version < 1.24' 30 ) 31 ipam_config['LinkLocalIPs'] = link_local_ips 32 33 if ipam_config: 34 self['IPAMConfig'] = ipam_config 35 36 37 class NetworkingConfig(dict): 38 def __init__(self, endpoints_config=None): 39 if endpoints_config: 40 self["EndpointsConfig"] = endpoints_config 41 42 43 class IPAMConfig(dict): 44 """ 45 Create an IPAM (IP Address Management) config dictionary to be used with 46 :py:meth:`~docker.api.network.NetworkApiMixin.create_network`. 47 48 Args: 49 50 driver (str): The IPAM driver to use. Defaults to ``default``. 51 pool_configs (list): A list of pool configurations 52 (:py:class:`~docker.types.IPAMPool`). Defaults to empty list. 53 54 Example: 55 56 >>> ipam_config = docker.types.IPAMConfig(driver='default') 57 >>> network = client.create_network('network1', ipam=ipam_config) 58 59 """ 60 def __init__(self, driver='default', pool_configs=None): 61 self.update({ 62 'Driver': driver, 63 'Config': pool_configs or [] 64 }) 65 66 67 class IPAMPool(dict): 68 """ 69 Create an IPAM pool config dictionary to be added to the 70 ``pool_configs`` parameter of 71 :py:class:`~docker.types.IPAMConfig`. 72 73 Args: 74 75 subnet (str): Custom subnet for this IPAM pool using the CIDR 76 notation. Defaults to ``None``. 77 iprange (str): Custom IP range for endpoints in this IPAM pool using 78 the CIDR notation. Defaults to ``None``. 79 gateway (str): Custom IP address for the pool's gateway. 80 aux_addresses (dict): A dictionary of ``key -> ip_address`` 81 relationships specifying auxiliary addresses that need to be 82 allocated by the IPAM driver. 83 84 Example: 85 86 >>> ipam_pool = docker.types.IPAMPool( 87 subnet='124.42.0.0/16', 88 iprange='124.42.0.0/24', 89 gateway='124.42.0.254', 90 aux_addresses={ 91 'reserved1': '124.42.1.1' 92 } 93 ) 94 >>> ipam_config = docker.types.IPAMConfig( 95 pool_configs=[ipam_pool]) 96 """ 97 def __init__(self, subnet=None, iprange=None, gateway=None, 98 aux_addresses=None): 99 self.update({ 100 'Subnet': subnet, 101 'IPRange': iprange, 102 'Gateway': gateway, 103 'AuxiliaryAddresses': aux_addresses 104 }) 105 [end of docker/types/networks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/types/networks.py b/docker/types/networks.py --- a/docker/types/networks.py +++ b/docker/types/networks.py @@ -50,6 +50,8 @@ driver (str): The IPAM driver to use. Defaults to ``default``. pool_configs (list): A list of pool configurations (:py:class:`~docker.types.IPAMPool`). Defaults to empty list. + options (dict): Driver options as a key-value dictionary. + Defaults to `None`. Example: @@ -57,12 +59,17 @@ >>> network = client.create_network('network1', ipam=ipam_config) """ - def __init__(self, driver='default', pool_configs=None): + def __init__(self, driver='default', pool_configs=None, options=None): self.update({ 'Driver': driver, 'Config': pool_configs or [] }) + if options: + if not isinstance(options, dict): + raise TypeError('IPAMConfig options must be a dictionary') + self['Options'] = options + class IPAMPool(dict): """
{"golden_diff": "diff --git a/docker/types/networks.py b/docker/types/networks.py\n--- a/docker/types/networks.py\n+++ b/docker/types/networks.py\n@@ -50,6 +50,8 @@\n driver (str): The IPAM driver to use. Defaults to ``default``.\n pool_configs (list): A list of pool configurations\n (:py:class:`~docker.types.IPAMPool`). Defaults to empty list.\n+ options (dict): Driver options as a key-value dictionary.\n+ Defaults to `None`.\n \n Example:\n \n@@ -57,12 +59,17 @@\n >>> network = client.create_network('network1', ipam=ipam_config)\n \n \"\"\"\n- def __init__(self, driver='default', pool_configs=None):\n+ def __init__(self, driver='default', pool_configs=None, options=None):\n self.update({\n 'Driver': driver,\n 'Config': pool_configs or []\n })\n \n+ if options:\n+ if not isinstance(options, dict):\n+ raise TypeError('IPAMConfig options must be a dictionary')\n+ self['Options'] = options\n+\n \n class IPAMPool(dict):\n \"\"\"\n", "issue": "Add docker network IPAM options parameter\nIPAM driver missing options\n\nsupports an options field in the IPAM config \nIt introduced in API v1.22.\n\n```\nPOST /networks/create Now supports an options field in the IPAM config that provides options for custom IPAM plugins.\n```\n\n", "before_files": [{"content": "from .. import errors\nfrom ..utils import normalize_links, version_lt\n\n\nclass EndpointConfig(dict):\n def __init__(self, version, aliases=None, links=None, ipv4_address=None,\n ipv6_address=None, link_local_ips=None):\n if version_lt(version, '1.22'):\n raise errors.InvalidVersion(\n 'Endpoint config is not supported for API version < 1.22'\n )\n\n if aliases:\n self[\"Aliases\"] = aliases\n\n if links:\n self[\"Links\"] = normalize_links(links)\n\n ipam_config = {}\n if ipv4_address:\n ipam_config['IPv4Address'] = ipv4_address\n\n if ipv6_address:\n ipam_config['IPv6Address'] = ipv6_address\n\n if link_local_ips is not None:\n if version_lt(version, '1.24'):\n raise errors.InvalidVersion(\n 'link_local_ips is not supported for API version < 1.24'\n )\n ipam_config['LinkLocalIPs'] = link_local_ips\n\n if ipam_config:\n self['IPAMConfig'] = ipam_config\n\n\nclass NetworkingConfig(dict):\n def __init__(self, endpoints_config=None):\n if endpoints_config:\n self[\"EndpointsConfig\"] = endpoints_config\n\n\nclass IPAMConfig(dict):\n \"\"\"\n Create an IPAM (IP Address Management) config dictionary to be used with\n :py:meth:`~docker.api.network.NetworkApiMixin.create_network`.\n\n Args:\n\n driver (str): The IPAM driver to use. Defaults to ``default``.\n pool_configs (list): A list of pool configurations\n (:py:class:`~docker.types.IPAMPool`). Defaults to empty list.\n\n Example:\n\n >>> ipam_config = docker.types.IPAMConfig(driver='default')\n >>> network = client.create_network('network1', ipam=ipam_config)\n\n \"\"\"\n def __init__(self, driver='default', pool_configs=None):\n self.update({\n 'Driver': driver,\n 'Config': pool_configs or []\n })\n\n\nclass IPAMPool(dict):\n \"\"\"\n Create an IPAM pool config dictionary to be added to the\n ``pool_configs`` parameter of\n :py:class:`~docker.types.IPAMConfig`.\n\n Args:\n\n subnet (str): Custom subnet for this IPAM pool using the CIDR\n notation. Defaults to ``None``.\n iprange (str): Custom IP range for endpoints in this IPAM pool using\n the CIDR notation. Defaults to ``None``.\n gateway (str): Custom IP address for the pool's gateway.\n aux_addresses (dict): A dictionary of ``key -> ip_address``\n relationships specifying auxiliary addresses that need to be\n allocated by the IPAM driver.\n\n Example:\n\n >>> ipam_pool = docker.types.IPAMPool(\n subnet='124.42.0.0/16',\n iprange='124.42.0.0/24',\n gateway='124.42.0.254',\n aux_addresses={\n 'reserved1': '124.42.1.1'\n }\n )\n >>> ipam_config = docker.types.IPAMConfig(\n pool_configs=[ipam_pool])\n \"\"\"\n def __init__(self, subnet=None, iprange=None, gateway=None,\n aux_addresses=None):\n self.update({\n 'Subnet': subnet,\n 'IPRange': iprange,\n 'Gateway': gateway,\n 'AuxiliaryAddresses': aux_addresses\n })\n", "path": "docker/types/networks.py"}]}
1,579
255
gh_patches_debug_2452
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-2225
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> missing hidden import for skimage When packaging an application that imports skimage.feature (and nothing else), the app would not run due to an ImportError on the "transform" module. This can be fixed by adding one item to the hiddenimports in hook-skimage.transform.py file (bolded below): > hiddenimports = ['skimage.draw.draw', > 'skimage._shared.geometry', > 'skimage.filters.rank.core_cy', > **'skimage._shared.transform'**] > > datas = collect_data_files('skimage') PyInstaller 3.2, Windows 7 64 bit, Python 2.7.12, Anaconda 4.1.1 distribution. </issue> <code> [start of PyInstaller/hooks/hook-skimage.transform.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2014-2016, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License with exception 5 # for distributing bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 #----------------------------------------------------------------------------- 9 from PyInstaller.utils.hooks import collect_data_files 10 11 # Hook tested with scikit-image (skimage) 0.9.3 on Mac OS 10.9 and Windows 7 12 # 64-bit 13 hiddenimports = ['skimage.draw.draw', 14 'skimage._shared.geometry', 15 'skimage.filters.rank.core_cy'] 16 17 datas = collect_data_files('skimage') 18 [end of PyInstaller/hooks/hook-skimage.transform.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/hooks/hook-skimage.transform.py b/PyInstaller/hooks/hook-skimage.transform.py --- a/PyInstaller/hooks/hook-skimage.transform.py +++ b/PyInstaller/hooks/hook-skimage.transform.py @@ -12,6 +12,7 @@ # 64-bit hiddenimports = ['skimage.draw.draw', 'skimage._shared.geometry', + 'skimage._shared.transform', 'skimage.filters.rank.core_cy'] datas = collect_data_files('skimage')
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-skimage.transform.py b/PyInstaller/hooks/hook-skimage.transform.py\n--- a/PyInstaller/hooks/hook-skimage.transform.py\n+++ b/PyInstaller/hooks/hook-skimage.transform.py\n@@ -12,6 +12,7 @@\n # 64-bit\n hiddenimports = ['skimage.draw.draw',\n 'skimage._shared.geometry',\n+ 'skimage._shared.transform',\n 'skimage.filters.rank.core_cy']\n \n datas = collect_data_files('skimage')\n", "issue": "missing hidden import for skimage\nWhen packaging an application that imports skimage.feature (and nothing else), the app would not run due to an ImportError on the \"transform\" module. This can be fixed by adding one item to the hiddenimports in hook-skimage.transform.py file (bolded below):\n\n> hiddenimports = ['skimage.draw.draw',\n> 'skimage._shared.geometry',\n> 'skimage.filters.rank.core_cy',\n> **'skimage._shared.transform'**] \n> \n> datas = collect_data_files('skimage')\n\nPyInstaller 3.2, Windows 7 64 bit, Python 2.7.12, Anaconda 4.1.1 distribution.\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2014-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom PyInstaller.utils.hooks import collect_data_files\n\n# Hook tested with scikit-image (skimage) 0.9.3 on Mac OS 10.9 and Windows 7\n# 64-bit\nhiddenimports = ['skimage.draw.draw',\n 'skimage._shared.geometry',\n 'skimage.filters.rank.core_cy']\n\ndatas = collect_data_files('skimage')\n", "path": "PyInstaller/hooks/hook-skimage.transform.py"}]}
869
117
gh_patches_debug_7034
rasdani/github-patches
git_diff
aws__aws-cli-5019
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add support for PyYAML 5.3 Closes: https://github.com/aws/aws-cli/issues/4828 Signed-off-by: Igor Raits <[email protected]> *Issue #, if available:* *Description of changes:* By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import codecs 3 import os.path 4 import re 5 import sys 6 7 from setuptools import setup, find_packages 8 9 10 here = os.path.abspath(os.path.dirname(__file__)) 11 12 13 def read(*parts): 14 return codecs.open(os.path.join(here, *parts), 'r').read() 15 16 17 def find_version(*file_paths): 18 version_file = read(*file_paths) 19 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", 20 version_file, re.M) 21 if version_match: 22 return version_match.group(1) 23 raise RuntimeError("Unable to find version string.") 24 25 26 install_requires = [ 27 'botocore==1.15.10', 28 'docutils>=0.10,<0.16', 29 'rsa>=3.1.2,<=3.5.0', 30 's3transfer>=0.3.0,<0.4.0', 31 'PyYAML>=3.10,<5.3', 32 ] 33 34 35 if sys.version_info[:2] == (3, 4): 36 install_requires.append('colorama>=0.2.5,<0.4.2') 37 else: 38 install_requires.append('colorama>=0.2.5,<0.4.4') 39 40 41 setup_options = dict( 42 name='awscli', 43 version=find_version("awscli", "__init__.py"), 44 description='Universal Command Line Environment for AWS.', 45 long_description=read('README.rst'), 46 author='Amazon Web Services', 47 url='http://aws.amazon.com/cli/', 48 scripts=['bin/aws', 'bin/aws.cmd', 49 'bin/aws_completer', 'bin/aws_zsh_completer.sh', 50 'bin/aws_bash_completer'], 51 packages=find_packages(exclude=['tests*']), 52 package_data={'awscli': ['data/*.json', 'examples/*/*.rst', 53 'examples/*/*.txt', 'examples/*/*/*.txt', 54 'examples/*/*/*.rst', 'topics/*.rst', 55 'topics/*.json']}, 56 install_requires=install_requires, 57 extras_require={}, 58 license="Apache License 2.0", 59 classifiers=[ 60 'Development Status :: 5 - Production/Stable', 61 'Intended Audience :: Developers', 62 'Intended Audience :: System Administrators', 63 'Natural Language :: English', 64 'License :: OSI Approved :: Apache Software License', 65 'Programming Language :: Python', 66 'Programming Language :: Python :: 2', 67 'Programming Language :: Python :: 2.7', 68 'Programming Language :: Python :: 3', 69 'Programming Language :: Python :: 3.4', 70 'Programming Language :: Python :: 3.5', 71 'Programming Language :: Python :: 3.6', 72 'Programming Language :: Python :: 3.7', 73 'Programming Language :: Python :: 3.8', 74 ], 75 ) 76 77 78 if 'py2exe' in sys.argv: 79 # This will actually give us a py2exe command. 80 import py2exe 81 # And we have some py2exe specific options. 82 setup_options['options'] = { 83 'py2exe': { 84 'optimize': 0, 85 'skip_archive': True, 86 'dll_excludes': ['crypt32.dll'], 87 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser', 88 'awscli', 'ConfigParser', 'xml.etree', 'pipes'], 89 } 90 } 91 setup_options['console'] = ['bin/aws'] 92 93 94 setup(**setup_options) 95 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -28,13 +28,14 @@ 'docutils>=0.10,<0.16', 'rsa>=3.1.2,<=3.5.0', 's3transfer>=0.3.0,<0.4.0', - 'PyYAML>=3.10,<5.3', ] if sys.version_info[:2] == (3, 4): + install_requires.append('PyYAML>=3.10,<5.3') install_requires.append('colorama>=0.2.5,<0.4.2') else: + install_requires.append('PyYAML>=3.10,<5.4') install_requires.append('colorama>=0.2.5,<0.4.4')
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -28,13 +28,14 @@\n 'docutils>=0.10,<0.16',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.3.0,<0.4.0',\n- 'PyYAML>=3.10,<5.3',\n ]\n \n \n if sys.version_info[:2] == (3, 4):\n+ install_requires.append('PyYAML>=3.10,<5.3')\n install_requires.append('colorama>=0.2.5,<0.4.2')\n else:\n+ install_requires.append('PyYAML>=3.10,<5.4')\n install_requires.append('colorama>=0.2.5,<0.4.4')\n", "issue": "Add support for PyYAML 5.3\nCloses: https://github.com/aws/aws-cli/issues/4828\r\nSigned-off-by: Igor Raits <[email protected]>\r\n\r\n*Issue #, if available:*\r\n\r\n*Description of changes:*\r\n\r\n\r\nBy submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\ninstall_requires = [\n 'botocore==1.15.10',\n 'docutils>=0.10,<0.16',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.3.0,<0.4.0',\n 'PyYAML>=3.10,<5.3',\n]\n\n\nif sys.version_info[:2] == (3, 4):\n install_requires.append('colorama>=0.2.5,<0.4.2')\nelse:\n install_requires.append('colorama>=0.2.5,<0.4.4')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=read('README.rst'),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*.txt', 'examples/*/*/*.txt',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=install_requires,\n extras_require={},\n license=\"Apache License 2.0\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n)\n\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]}
1,562
197
gh_patches_debug_19776
rasdani/github-patches
git_diff
azavea__raster-vision-1484
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Local runner should write makefile to temporary dir Instead, it writes it to the `root_uri` which might be an S3 URI, and `make`, which is used by the local runner cannot handle that. Makefile error when `root_uri` is an S3 path ## 🐛 Bug When running training command and having `root_uri` set to an S3 folder, this error shows up: ``` make: s3://<random_bucket_name>/predictions/Makefile: No such file or directory make: *** No rule to make target 's3://<random_bucket_name>/predictions/Makefile'. Stop. ``` This error disappears when `root_uri` is a local path. AWS config is right as it is able to read and write the files. ## To Reproduce Steps to reproduce the behavior: 1. I ran the following command inside the container: `python -m rastervision.pipeline.cli run local code/local_exp.py -a raw_uri s3://<random_bucket_name>/datafortesting/data/ -a root_uri s3://<random_bucket_name>/predictions -a test False` <!-- Please provide the command executed, source of the get_config() function, error messages, and/or full stack traces if at all possible --> ## Expected behavior It should run normally like it is running when `root_uri` is a local path. ## Environment Running with docker. **Image**: quay.io/azavea/raster-vision:pytorch-v0.13.1 ## Additional context This might be a relevant issue: #991 </issue> <code> [start of rastervision_pipeline/rastervision/pipeline/runner/local_runner.py] 1 import sys 2 from os.path import dirname, join 3 from subprocess import Popen 4 5 from rastervision.pipeline.file_system import str_to_file 6 from rastervision.pipeline.runner.runner import Runner 7 from rastervision.pipeline.utils import terminate_at_exit 8 9 LOCAL = 'local' 10 11 12 class LocalRunner(Runner): 13 """Runs each command locally using different processes for each command/split. 14 15 This is implemented by generating a Makefile and then running it using make. 16 """ 17 18 def run(self, 19 cfg_json_uri, 20 pipeline, 21 commands, 22 num_splits=1, 23 pipeline_run_name: str = 'raster-vision'): 24 num_commands = 0 25 for command in commands: 26 if command in pipeline.split_commands and num_splits > 1: 27 num_commands += num_splits 28 else: 29 num_commands += 1 30 31 makefile = '.PHONY: ' 32 makefile += ' '.join([str(ci) for ci in range(num_commands)]) 33 makefile += '\n\n' 34 35 makefile += 'all: ' 36 makefile += ' '.join([str(ci) for ci in range(num_commands)]) 37 makefile += '\n\n' 38 39 prev_command_inds = [] 40 curr_command_ind = 0 41 for command in commands: 42 43 curr_command_inds = [] 44 if command in pipeline.split_commands and num_splits > 1: 45 for split_ind in range(num_splits): 46 makefile += '{}: '.format(curr_command_ind) 47 makefile += ' '.join([str(ci) for ci in prev_command_inds]) 48 makefile += '\n' 49 invocation = ( 50 'python -m rastervision.pipeline.cli run_command ' 51 '{} {} --split-ind {} --num-splits {}').format( 52 cfg_json_uri, command, split_ind, num_splits) 53 makefile += '\t{}\n\n'.format(invocation) 54 curr_command_inds.append(curr_command_ind) 55 curr_command_ind += 1 56 else: 57 makefile += '{}: '.format(curr_command_ind) 58 makefile += ' '.join([str(ci) for ci in prev_command_inds]) 59 makefile += '\n' 60 invocation = ( 61 'python -m rastervision.pipeline.cli run_command ' 62 '{} {}'.format(cfg_json_uri, command)) 63 makefile += '\t{}\n\n'.format(invocation) 64 curr_command_inds.append(curr_command_ind) 65 curr_command_ind += 1 66 67 prev_command_inds = curr_command_inds 68 69 makefile_path = join(dirname(cfg_json_uri), 'Makefile') 70 str_to_file(makefile, makefile_path) 71 process = Popen(['make', '-j', '-f', makefile_path]) 72 terminate_at_exit(process) 73 exitcode = process.wait() 74 if exitcode != 0: 75 sys.exit(exitcode) 76 else: 77 return 0 78 [end of rastervision_pipeline/rastervision/pipeline/runner/local_runner.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py b/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py --- a/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py +++ b/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py @@ -2,7 +2,7 @@ from os.path import dirname, join from subprocess import Popen -from rastervision.pipeline.file_system import str_to_file +from rastervision.pipeline.file_system import str_to_file, download_if_needed from rastervision.pipeline.runner.runner import Runner from rastervision.pipeline.utils import terminate_at_exit @@ -68,7 +68,8 @@ makefile_path = join(dirname(cfg_json_uri), 'Makefile') str_to_file(makefile, makefile_path) - process = Popen(['make', '-j', '-f', makefile_path]) + makefile_path_local = download_if_needed(makefile_path) + process = Popen(['make', '-j', '-f', makefile_path_local]) terminate_at_exit(process) exitcode = process.wait() if exitcode != 0:
{"golden_diff": "diff --git a/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py b/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py\n--- a/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py\n+++ b/rastervision_pipeline/rastervision/pipeline/runner/local_runner.py\n@@ -2,7 +2,7 @@\n from os.path import dirname, join\n from subprocess import Popen\n \n-from rastervision.pipeline.file_system import str_to_file\n+from rastervision.pipeline.file_system import str_to_file, download_if_needed\n from rastervision.pipeline.runner.runner import Runner\n from rastervision.pipeline.utils import terminate_at_exit\n \n@@ -68,7 +68,8 @@\n \n makefile_path = join(dirname(cfg_json_uri), 'Makefile')\n str_to_file(makefile, makefile_path)\n- process = Popen(['make', '-j', '-f', makefile_path])\n+ makefile_path_local = download_if_needed(makefile_path)\n+ process = Popen(['make', '-j', '-f', makefile_path_local])\n terminate_at_exit(process)\n exitcode = process.wait()\n if exitcode != 0:\n", "issue": "Local runner should write makefile to temporary dir\nInstead, it writes it to the `root_uri` which might be an S3 URI, and `make`, which is used by the local runner cannot handle that.\nMakefile error when `root_uri` is an S3 path\n## \ud83d\udc1b Bug\r\nWhen running training command and having `root_uri` set to an S3 folder, this error shows up:\r\n```\r\nmake: s3://<random_bucket_name>/predictions/Makefile: No such file or directory\r\nmake: *** No rule to make target 's3://<random_bucket_name>/predictions/Makefile'. Stop.\r\n```\r\n\r\nThis error disappears when `root_uri` is a local path. AWS config is right as it is able to read and write the files.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. I ran the following command inside the container:\r\n`python -m rastervision.pipeline.cli run local code/local_exp.py -a raw_uri s3://<random_bucket_name>/datafortesting/data/ -a root_uri s3://<random_bucket_name>/predictions -a test False`\r\n\r\n<!-- Please provide the command executed, source of the get_config() function, error messages, and/or full stack traces if at all possible -->\r\n\r\n## Expected behavior\r\n\r\nIt should run normally like it is running when `root_uri` is a local path.\r\n\r\n## Environment\r\n\r\nRunning with docker. **Image**: quay.io/azavea/raster-vision:pytorch-v0.13.1\r\n\r\n## Additional context\r\n\r\nThis might be a relevant issue: #991 \r\n\n", "before_files": [{"content": "import sys\nfrom os.path import dirname, join\nfrom subprocess import Popen\n\nfrom rastervision.pipeline.file_system import str_to_file\nfrom rastervision.pipeline.runner.runner import Runner\nfrom rastervision.pipeline.utils import terminate_at_exit\n\nLOCAL = 'local'\n\n\nclass LocalRunner(Runner):\n \"\"\"Runs each command locally using different processes for each command/split.\n\n This is implemented by generating a Makefile and then running it using make.\n \"\"\"\n\n def run(self,\n cfg_json_uri,\n pipeline,\n commands,\n num_splits=1,\n pipeline_run_name: str = 'raster-vision'):\n num_commands = 0\n for command in commands:\n if command in pipeline.split_commands and num_splits > 1:\n num_commands += num_splits\n else:\n num_commands += 1\n\n makefile = '.PHONY: '\n makefile += ' '.join([str(ci) for ci in range(num_commands)])\n makefile += '\\n\\n'\n\n makefile += 'all: '\n makefile += ' '.join([str(ci) for ci in range(num_commands)])\n makefile += '\\n\\n'\n\n prev_command_inds = []\n curr_command_ind = 0\n for command in commands:\n\n curr_command_inds = []\n if command in pipeline.split_commands and num_splits > 1:\n for split_ind in range(num_splits):\n makefile += '{}: '.format(curr_command_ind)\n makefile += ' '.join([str(ci) for ci in prev_command_inds])\n makefile += '\\n'\n invocation = (\n 'python -m rastervision.pipeline.cli run_command '\n '{} {} --split-ind {} --num-splits {}').format(\n cfg_json_uri, command, split_ind, num_splits)\n makefile += '\\t{}\\n\\n'.format(invocation)\n curr_command_inds.append(curr_command_ind)\n curr_command_ind += 1\n else:\n makefile += '{}: '.format(curr_command_ind)\n makefile += ' '.join([str(ci) for ci in prev_command_inds])\n makefile += '\\n'\n invocation = (\n 'python -m rastervision.pipeline.cli run_command '\n '{} {}'.format(cfg_json_uri, command))\n makefile += '\\t{}\\n\\n'.format(invocation)\n curr_command_inds.append(curr_command_ind)\n curr_command_ind += 1\n\n prev_command_inds = curr_command_inds\n\n makefile_path = join(dirname(cfg_json_uri), 'Makefile')\n str_to_file(makefile, makefile_path)\n process = Popen(['make', '-j', '-f', makefile_path])\n terminate_at_exit(process)\n exitcode = process.wait()\n if exitcode != 0:\n sys.exit(exitcode)\n else:\n return 0\n", "path": "rastervision_pipeline/rastervision/pipeline/runner/local_runner.py"}]}
1,651
268
gh_patches_debug_26415
rasdani/github-patches
git_diff
bokeh__bokeh-9163
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update dataset for parallel coords plot examples The example currently does this: ``` df = pd.read_csv("https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv") ``` Which is not really great. We should add a data set to `sampledata` and use that. </issue> <code> [start of examples/custom/parallel_plot/parallel_plot.py] 1 import numpy as np 2 import pandas as pd 3 4 from bokeh.plotting import figure 5 from bokeh.layouts import column 6 from bokeh.models import (Range1d, ColumnDataSource, Div, LinearAxis, 7 LinearColorMapper, MultiLine, 8 FixedTicker, BasicTickFormatter, FuncTickFormatter) 9 10 from parallel_selection_tool import ParallelSelectionTool 11 from parallel_reset import ParallelResetTool 12 13 14 def parallel_plot(df, color=None, palette=None): 15 """From a dataframe create a parallel coordinate plot 16 """ 17 npts = df.shape[0] 18 ndims = len(df.columns) 19 20 if color is None: 21 color = np.ones(npts) 22 if palette is None: 23 palette = ['#ff0000'] 24 25 cmap = LinearColorMapper(high=color.min(), 26 low=color.max(), 27 palette=palette) 28 29 data_source = ColumnDataSource(dict( 30 xs=np.arange(ndims)[None, :].repeat(npts, axis=0).tolist(), 31 ys=np.array((df-df.min())/(df.max()-df.min())).tolist(), 32 color=color)) 33 34 p = figure(x_range=(-1, ndims), 35 y_range=(0, 1), 36 width=1000, 37 tools="pan, box_zoom") 38 39 # Create x axis ticks from columns contained in dataframe 40 fixed_x_ticks = FixedTicker( 41 ticks=np.arange(ndims), minor_ticks=[]) 42 formatter_x_ticks = FuncTickFormatter( 43 code="return columns[index]", args={"columns": df.columns}) 44 p.xaxis.ticker = fixed_x_ticks 45 p.xaxis.formatter = formatter_x_ticks 46 47 p.yaxis.visible = False 48 p.y_range.start = 0 49 p.y_range.end = 1 50 p.y_range.bounds = (-0.1, 1.1) # add a little padding around y axis 51 p.xgrid.visible = False 52 p.ygrid.visible = False 53 54 # Create extra y axis for each dataframe column 55 tickformatter = BasicTickFormatter(precision=1) 56 for index, col in enumerate(df.columns): 57 start = df[col].min() 58 end = df[col].max() 59 bound_min = start + abs(end-start) * (p.y_range.bounds[0] - p.y_range.start) 60 bound_max = end + abs(end-start) * (p.y_range.bounds[1] - p.y_range.end) 61 p.extra_y_ranges.update( 62 {col: Range1d(start=bound_min, end=bound_max, bounds=(bound_min, bound_max))}) 63 64 fixedticks = FixedTicker( 65 ticks=np.linspace(start, end, 8), minor_ticks=[]) 66 67 p.add_layout(LinearAxis(fixed_location=index, y_range_name=col, 68 ticker=fixedticks, formatter=tickformatter), 'right') 69 70 # create the data renderer ( MultiLine ) 71 # specify selected and non selected style 72 non_selected_line_style = dict(line_color='grey', line_width=0.1, line_alpha=0.5) 73 74 selected_line_style = dict(line_color={'field': 'color', 'transform': cmap}, line_width=1) 75 76 parallel_renderer = p.multi_line( 77 xs="xs", ys="ys", source=data_source, **non_selected_line_style) 78 79 # Specify selection style 80 selected_lines = MultiLine(**selected_line_style) 81 82 # Specify non selection style 83 nonselected_lines = MultiLine(**non_selected_line_style) 84 85 parallel_renderer.selection_glyph = selected_lines 86 parallel_renderer.nonselection_glyph = nonselected_lines 87 p.y_range.start = p.y_range.bounds[0] 88 p.y_range.end = p.y_range.bounds[1] 89 90 rect_source = ColumnDataSource({ 91 'x': [], 'y': [], 'width': [], 'height': [] 92 }) 93 94 # add rectangle selections 95 selection_renderer = p.rect(x='x', y='y', width='width', height='height', 96 source=rect_source, 97 fill_alpha=0.7, fill_color='#009933') 98 selection_tool = ParallelSelectionTool( 99 renderer_select=selection_renderer, renderer_data=parallel_renderer, 100 box_width=10) 101 # custom resets (reset only axes not selections) 102 reset_axes = ParallelResetTool() 103 104 # add tools and activate selection ones 105 p.add_tools(selection_tool, reset_axes) 106 p.toolbar.active_drag = selection_tool 107 return p 108 109 if __name__ == '__main__': 110 from bokeh.palettes import Viridis256 111 from bokeh.io import show 112 df = pd.read_csv("https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv") 113 p = parallel_plot(df=df, color=df[df.columns[0]], palette=Viridis256) 114 div = Div(text="Select up and down column grid lines to define filters. Double click a filter to reset it.") 115 show(column(div, p)) 116 [end of examples/custom/parallel_plot/parallel_plot.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/custom/parallel_plot/parallel_plot.py b/examples/custom/parallel_plot/parallel_plot.py --- a/examples/custom/parallel_plot/parallel_plot.py +++ b/examples/custom/parallel_plot/parallel_plot.py @@ -1,11 +1,11 @@ import numpy as np -import pandas as pd from bokeh.plotting import figure from bokeh.layouts import column from bokeh.models import (Range1d, ColumnDataSource, Div, LinearAxis, LinearColorMapper, MultiLine, FixedTicker, BasicTickFormatter, FuncTickFormatter) +from bokeh.sampledata.autompg import autompg_clean as df from parallel_selection_tool import ParallelSelectionTool from parallel_reset import ParallelResetTool @@ -109,7 +109,9 @@ if __name__ == '__main__': from bokeh.palettes import Viridis256 from bokeh.io import show - df = pd.read_csv("https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv") + del df['origin'] + del df['mfr'] + del df['name'] p = parallel_plot(df=df, color=df[df.columns[0]], palette=Viridis256) div = Div(text="Select up and down column grid lines to define filters. Double click a filter to reset it.") show(column(div, p))
{"golden_diff": "diff --git a/examples/custom/parallel_plot/parallel_plot.py b/examples/custom/parallel_plot/parallel_plot.py\n--- a/examples/custom/parallel_plot/parallel_plot.py\n+++ b/examples/custom/parallel_plot/parallel_plot.py\n@@ -1,11 +1,11 @@\n import numpy as np\n-import pandas as pd\n \n from bokeh.plotting import figure\n from bokeh.layouts import column\n from bokeh.models import (Range1d, ColumnDataSource, Div, LinearAxis,\n LinearColorMapper, MultiLine,\n FixedTicker, BasicTickFormatter, FuncTickFormatter)\n+from bokeh.sampledata.autompg import autompg_clean as df\n \n from parallel_selection_tool import ParallelSelectionTool\n from parallel_reset import ParallelResetTool\n@@ -109,7 +109,9 @@\n if __name__ == '__main__':\n from bokeh.palettes import Viridis256\n from bokeh.io import show\n- df = pd.read_csv(\"https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv\")\n+ del df['origin']\n+ del df['mfr']\n+ del df['name']\n p = parallel_plot(df=df, color=df[df.columns[0]], palette=Viridis256)\n div = Div(text=\"Select up and down column grid lines to define filters. Double click a filter to reset it.\")\n show(column(div, p))\n", "issue": "Update dataset for parallel coords plot examples\nThe example currently does this:\r\n```\r\ndf = pd.read_csv(\"https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv\")\r\n```\r\n\r\nWhich is not really great. We should add a data set to `sampledata` and use that. \r\n\n", "before_files": [{"content": "import numpy as np\nimport pandas as pd\n\nfrom bokeh.plotting import figure\nfrom bokeh.layouts import column\nfrom bokeh.models import (Range1d, ColumnDataSource, Div, LinearAxis,\n LinearColorMapper, MultiLine,\n FixedTicker, BasicTickFormatter, FuncTickFormatter)\n\nfrom parallel_selection_tool import ParallelSelectionTool\nfrom parallel_reset import ParallelResetTool\n\n\ndef parallel_plot(df, color=None, palette=None):\n \"\"\"From a dataframe create a parallel coordinate plot\n \"\"\"\n npts = df.shape[0]\n ndims = len(df.columns)\n\n if color is None:\n color = np.ones(npts)\n if palette is None:\n palette = ['#ff0000']\n\n cmap = LinearColorMapper(high=color.min(),\n low=color.max(),\n palette=palette)\n\n data_source = ColumnDataSource(dict(\n xs=np.arange(ndims)[None, :].repeat(npts, axis=0).tolist(),\n ys=np.array((df-df.min())/(df.max()-df.min())).tolist(),\n color=color))\n\n p = figure(x_range=(-1, ndims),\n y_range=(0, 1),\n width=1000,\n tools=\"pan, box_zoom\")\n\n # Create x axis ticks from columns contained in dataframe\n fixed_x_ticks = FixedTicker(\n ticks=np.arange(ndims), minor_ticks=[])\n formatter_x_ticks = FuncTickFormatter(\n code=\"return columns[index]\", args={\"columns\": df.columns})\n p.xaxis.ticker = fixed_x_ticks\n p.xaxis.formatter = formatter_x_ticks\n\n p.yaxis.visible = False\n p.y_range.start = 0\n p.y_range.end = 1\n p.y_range.bounds = (-0.1, 1.1) # add a little padding around y axis\n p.xgrid.visible = False\n p.ygrid.visible = False\n\n # Create extra y axis for each dataframe column\n tickformatter = BasicTickFormatter(precision=1)\n for index, col in enumerate(df.columns):\n start = df[col].min()\n end = df[col].max()\n bound_min = start + abs(end-start) * (p.y_range.bounds[0] - p.y_range.start)\n bound_max = end + abs(end-start) * (p.y_range.bounds[1] - p.y_range.end)\n p.extra_y_ranges.update(\n {col: Range1d(start=bound_min, end=bound_max, bounds=(bound_min, bound_max))})\n\n fixedticks = FixedTicker(\n ticks=np.linspace(start, end, 8), minor_ticks=[])\n\n p.add_layout(LinearAxis(fixed_location=index, y_range_name=col,\n ticker=fixedticks, formatter=tickformatter), 'right')\n\n # create the data renderer ( MultiLine )\n # specify selected and non selected style\n non_selected_line_style = dict(line_color='grey', line_width=0.1, line_alpha=0.5)\n\n selected_line_style = dict(line_color={'field': 'color', 'transform': cmap}, line_width=1)\n\n parallel_renderer = p.multi_line(\n xs=\"xs\", ys=\"ys\", source=data_source, **non_selected_line_style)\n\n # Specify selection style\n selected_lines = MultiLine(**selected_line_style)\n\n # Specify non selection style\n nonselected_lines = MultiLine(**non_selected_line_style)\n\n parallel_renderer.selection_glyph = selected_lines\n parallel_renderer.nonselection_glyph = nonselected_lines\n p.y_range.start = p.y_range.bounds[0]\n p.y_range.end = p.y_range.bounds[1]\n\n rect_source = ColumnDataSource({\n 'x': [], 'y': [], 'width': [], 'height': []\n })\n\n # add rectangle selections\n selection_renderer = p.rect(x='x', y='y', width='width', height='height',\n source=rect_source,\n fill_alpha=0.7, fill_color='#009933')\n selection_tool = ParallelSelectionTool(\n renderer_select=selection_renderer, renderer_data=parallel_renderer,\n box_width=10)\n # custom resets (reset only axes not selections)\n reset_axes = ParallelResetTool()\n\n # add tools and activate selection ones\n p.add_tools(selection_tool, reset_axes)\n p.toolbar.active_drag = selection_tool\n return p\n\nif __name__ == '__main__':\n from bokeh.palettes import Viridis256\n from bokeh.io import show\n df = pd.read_csv(\"https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv\")\n p = parallel_plot(df=df, color=df[df.columns[0]], palette=Viridis256)\n div = Div(text=\"Select up and down column grid lines to define filters. Double click a filter to reset it.\")\n show(column(div, p))\n", "path": "examples/custom/parallel_plot/parallel_plot.py"}]}
1,905
303
gh_patches_debug_5990
rasdani/github-patches
git_diff
googleapis__python-bigquery-672
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError in `resource_name_to_date()` samples fixture See this samples [test run](https://source.cloud.google.com/results/invocations/e5c424d5-84a6-4505-ae44-3bc70fa94e44/targets/cloud-devrel%2Fclient-libraries%2Fpython%2Fgoogleapis%2Fpython-bigquery%2Fsamples%2Fpython3.6%2Fpresubmit/log). A bug in [this fixture](https://github.com/googleapis/python-bigquery/blob/bd7dbdae5c972b16bafc53c67911eeaa3255a880/samples/snippets/conftest.py#L33-L36) made all of them fail. > AttributeError: module 'datetime' has no attribute 'strptime' I wonder how this got in in the first place? </issue> <code> [start of samples/snippets/conftest.py] 1 # Copyright 2020 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import datetime 16 import random 17 18 from google.cloud import bigquery 19 import pytest 20 21 22 RESOURCE_PREFIX = "python_bigquery_samples_snippets" 23 RESOURCE_DATE_FORMAT = "%Y%m%d_%H%M%S" 24 RESOURCE_DATE_LENGTH = 4 + 2 + 2 + 1 + 2 + 2 + 2 25 26 27 def resource_prefix() -> str: 28 timestamp = datetime.datetime.utcnow().strftime(RESOURCE_DATE_FORMAT) 29 random_string = hex(random.randrange(1000000))[2:] 30 return f"{RESOURCE_PREFIX}_{timestamp}_{random_string}" 31 32 33 def resource_name_to_date(resource_name: str): 34 start_date = len(RESOURCE_PREFIX) + 1 35 date_string = resource_name[start_date : start_date + RESOURCE_DATE_LENGTH] 36 return datetime.strptime(date_string, RESOURCE_DATE_FORMAT) 37 38 39 @pytest.fixture(scope="session", autouse=True) 40 def cleanup_datasets(bigquery_client: bigquery.Client): 41 yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1) 42 for dataset in bigquery_client.list_datasets(): 43 if ( 44 dataset.dataset_id.startswith(RESOURCE_PREFIX) 45 and resource_name_to_date(dataset.dataset_id) < yesterday 46 ): 47 bigquery_client.delete_dataset( 48 dataset, delete_contents=True, not_found_ok=True 49 ) 50 51 52 @pytest.fixture(scope="session") 53 def bigquery_client(): 54 bigquery_client = bigquery.Client() 55 return bigquery_client 56 57 58 @pytest.fixture(scope="session") 59 def project_id(bigquery_client): 60 return bigquery_client.project 61 62 63 @pytest.fixture(scope="session") 64 def dataset_id(bigquery_client: bigquery.Client, project_id: str): 65 dataset_id = resource_prefix() 66 full_dataset_id = f"{project_id}.{dataset_id}" 67 dataset = bigquery.Dataset(full_dataset_id) 68 bigquery_client.create_dataset(dataset) 69 yield dataset_id 70 bigquery_client.delete_dataset(dataset, delete_contents=True, not_found_ok=True) 71 72 73 @pytest.fixture 74 def bigquery_client_patch(monkeypatch, bigquery_client): 75 monkeypatch.setattr(bigquery, "Client", lambda: bigquery_client) 76 [end of samples/snippets/conftest.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/samples/snippets/conftest.py b/samples/snippets/conftest.py --- a/samples/snippets/conftest.py +++ b/samples/snippets/conftest.py @@ -33,7 +33,7 @@ def resource_name_to_date(resource_name: str): start_date = len(RESOURCE_PREFIX) + 1 date_string = resource_name[start_date : start_date + RESOURCE_DATE_LENGTH] - return datetime.strptime(date_string, RESOURCE_DATE_FORMAT) + return datetime.datetime.strptime(date_string, RESOURCE_DATE_FORMAT) @pytest.fixture(scope="session", autouse=True)
{"golden_diff": "diff --git a/samples/snippets/conftest.py b/samples/snippets/conftest.py\n--- a/samples/snippets/conftest.py\n+++ b/samples/snippets/conftest.py\n@@ -33,7 +33,7 @@\n def resource_name_to_date(resource_name: str):\n start_date = len(RESOURCE_PREFIX) + 1\n date_string = resource_name[start_date : start_date + RESOURCE_DATE_LENGTH]\n- return datetime.strptime(date_string, RESOURCE_DATE_FORMAT)\n+ return datetime.datetime.strptime(date_string, RESOURCE_DATE_FORMAT)\n \n \n @pytest.fixture(scope=\"session\", autouse=True)\n", "issue": "AttributeError in `resource_name_to_date()` samples fixture\nSee this samples [test run](https://source.cloud.google.com/results/invocations/e5c424d5-84a6-4505-ae44-3bc70fa94e44/targets/cloud-devrel%2Fclient-libraries%2Fpython%2Fgoogleapis%2Fpython-bigquery%2Fsamples%2Fpython3.6%2Fpresubmit/log). A bug in [this fixture](https://github.com/googleapis/python-bigquery/blob/bd7dbdae5c972b16bafc53c67911eeaa3255a880/samples/snippets/conftest.py#L33-L36) made all of them fail.\r\n\r\n> AttributeError: module 'datetime' has no attribute 'strptime'\r\n\r\nI wonder how this got in in the first place?\r\n\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport random\n\nfrom google.cloud import bigquery\nimport pytest\n\n\nRESOURCE_PREFIX = \"python_bigquery_samples_snippets\"\nRESOURCE_DATE_FORMAT = \"%Y%m%d_%H%M%S\"\nRESOURCE_DATE_LENGTH = 4 + 2 + 2 + 1 + 2 + 2 + 2\n\n\ndef resource_prefix() -> str:\n timestamp = datetime.datetime.utcnow().strftime(RESOURCE_DATE_FORMAT)\n random_string = hex(random.randrange(1000000))[2:]\n return f\"{RESOURCE_PREFIX}_{timestamp}_{random_string}\"\n\n\ndef resource_name_to_date(resource_name: str):\n start_date = len(RESOURCE_PREFIX) + 1\n date_string = resource_name[start_date : start_date + RESOURCE_DATE_LENGTH]\n return datetime.strptime(date_string, RESOURCE_DATE_FORMAT)\n\n\[email protected](scope=\"session\", autouse=True)\ndef cleanup_datasets(bigquery_client: bigquery.Client):\n yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)\n for dataset in bigquery_client.list_datasets():\n if (\n dataset.dataset_id.startswith(RESOURCE_PREFIX)\n and resource_name_to_date(dataset.dataset_id) < yesterday\n ):\n bigquery_client.delete_dataset(\n dataset, delete_contents=True, not_found_ok=True\n )\n\n\[email protected](scope=\"session\")\ndef bigquery_client():\n bigquery_client = bigquery.Client()\n return bigquery_client\n\n\[email protected](scope=\"session\")\ndef project_id(bigquery_client):\n return bigquery_client.project\n\n\[email protected](scope=\"session\")\ndef dataset_id(bigquery_client: bigquery.Client, project_id: str):\n dataset_id = resource_prefix()\n full_dataset_id = f\"{project_id}.{dataset_id}\"\n dataset = bigquery.Dataset(full_dataset_id)\n bigquery_client.create_dataset(dataset)\n yield dataset_id\n bigquery_client.delete_dataset(dataset, delete_contents=True, not_found_ok=True)\n\n\[email protected]\ndef bigquery_client_patch(monkeypatch, bigquery_client):\n monkeypatch.setattr(bigquery, \"Client\", lambda: bigquery_client)\n", "path": "samples/snippets/conftest.py"}]}
1,461
133
gh_patches_debug_60681
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-1830
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Organization view pages result in 500 error Only on stag. I tested several different orgs. ![image](https://cloud.githubusercontent.com/assets/1654485/5165739/e9046902-73e8-11e4-9358-19cd8652c0c8.png) </issue> <code> [start of ckanext-hdx_search/ckanext/hdx_search/plugin.py] 1 import logging, re 2 import ckan.plugins as plugins 3 import ckan.plugins.toolkit as tk 4 import ckan.lib.plugins as lib_plugins 5 6 def convert_country(q): 7 for c in tk.get_action('group_list')({'user':'127.0.0.1'},{'all_fields': True}): 8 if re.findall(c['display_name'].lower(),q.lower()): 9 q += ' '+c['name'] 10 return q 11 12 class HDXSearchPlugin(plugins.SingletonPlugin): 13 plugins.implements(plugins.IConfigurer, inherit=False) 14 plugins.implements(plugins.IRoutes, inherit=True) 15 plugins.implements(plugins.ITemplateHelpers, inherit=False) 16 plugins.implements(plugins.IPackageController, inherit=True) 17 18 def update_config(self, config): 19 tk.add_template_directory(config, 'templates') 20 21 def get_helpers(self): 22 return {} 23 24 def before_map(self, map): 25 map.connect('search', '/search', 26 controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search') 27 map.connect('simple_search', 28 '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search') 29 return map 30 31 def after_map(self, map): 32 map.connect('search', '/search', 33 controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search') 34 map.connect('simple_search', 35 '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search') 36 return map 37 38 def before_search(self, search_params): 39 search_params['q'] = convert_country(search_params['q']) 40 if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']: 41 search_params['facet.field'].append('vocab_Topics') 42 43 # If indicator flag is set, search only that type 44 if 'ext_indicator' in search_params['extras']: 45 if int(search_params['extras']['ext_indicator']) == 1: 46 search_params['fq'] = search_params['fq'] + ' +extras_indicator:1' 47 elif int(search_params['extras']['ext_indicator']) == 0: 48 search_params['fq'] = search_params[ 49 'fq'] + ' -extras_indicator:1' 50 return search_params 51 52 def after_search(self, search_results, search_params): 53 return search_results 54 55 def before_view(self, pkg_dict): 56 return pkg_dict 57 [end of ckanext-hdx_search/ckanext/hdx_search/plugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ckanext-hdx_search/ckanext/hdx_search/plugin.py b/ckanext-hdx_search/ckanext/hdx_search/plugin.py --- a/ckanext-hdx_search/ckanext/hdx_search/plugin.py +++ b/ckanext-hdx_search/ckanext/hdx_search/plugin.py @@ -36,7 +36,7 @@ return map def before_search(self, search_params): - search_params['q'] = convert_country(search_params['q']) + #search_params['q'] = convert_country(search_params['q']) if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']: search_params['facet.field'].append('vocab_Topics')
{"golden_diff": "diff --git a/ckanext-hdx_search/ckanext/hdx_search/plugin.py b/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n--- a/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n+++ b/ckanext-hdx_search/ckanext/hdx_search/plugin.py\n@@ -36,7 +36,7 @@\n return map\n \n def before_search(self, search_params):\n- search_params['q'] = convert_country(search_params['q'])\n+ #search_params['q'] = convert_country(search_params['q'])\n if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:\n search_params['facet.field'].append('vocab_Topics')\n", "issue": "Organization view pages result in 500 error\nOnly on stag. I tested several different orgs. \n\n![image](https://cloud.githubusercontent.com/assets/1654485/5165739/e9046902-73e8-11e4-9358-19cd8652c0c8.png)\n\n", "before_files": [{"content": "import logging, re\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nimport ckan.lib.plugins as lib_plugins\n\ndef convert_country(q):\n for c in tk.get_action('group_list')({'user':'127.0.0.1'},{'all_fields': True}):\n if re.findall(c['display_name'].lower(),q.lower()):\n q += ' '+c['name']\n return q\n\nclass HDXSearchPlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers, inherit=False)\n plugins.implements(plugins.IPackageController, inherit=True)\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def get_helpers(self):\n return {}\n\n def before_map(self, map):\n map.connect('search', '/search',\n controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')\n map.connect('simple_search',\n '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')\n return map\n\n def after_map(self, map):\n map.connect('search', '/search',\n controller='ckanext.hdx_search.controllers.search_controller:HDXSearchController', action='search')\n map.connect('simple_search',\n '/dataset', controller='ckanext.hdx_search.controllers.simple_search_controller:HDXSimpleSearchController', action='package_search')\n return map\n\n def before_search(self, search_params):\n search_params['q'] = convert_country(search_params['q'])\n if 'facet.field' in search_params and 'vocab_Topics' not in search_params['facet.field']:\n search_params['facet.field'].append('vocab_Topics')\n\n # If indicator flag is set, search only that type\n if 'ext_indicator' in search_params['extras']:\n if int(search_params['extras']['ext_indicator']) == 1:\n search_params['fq'] = search_params['fq'] + ' +extras_indicator:1'\n elif int(search_params['extras']['ext_indicator']) == 0:\n search_params['fq'] = search_params[\n 'fq'] + ' -extras_indicator:1'\n return search_params\n\n def after_search(self, search_results, search_params):\n return search_results\n\n def before_view(self, pkg_dict):\n return pkg_dict\n", "path": "ckanext-hdx_search/ckanext/hdx_search/plugin.py"}]}
1,281
169
gh_patches_debug_5673
rasdani/github-patches
git_diff
scikit-hep__pyhf-1546
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Different ordering of channels between model.config.channels and mode.config.channel_nbins # Description We've recently observed that the ordering of channels outputted from `model.config.channels` differs from the order obtained with `model.config.channel_nbins`. This isn't really a bug, but more a quirk which we thought would be worth bringing to the attention of the developers. We ran into some issues when breaking up the `expected_data` list into individual channels by looping through the `model.config.channel_nbins` ordering, rather than the `model.config.channels` ordering (the `model.config.channels` order matches that of the model). Hopefully this issue helps save another user some time in the future, at very least. # Expected Behavior We expected that the order of the channels in the two dictionaries from `model.config.channels` and `model.config.channel_nbins` would be the same. # Actual Behavior The ordering of the channels is different. As an example, we are generating workspaces using two categories, and we're separating our data by year (2016, 2017, and 2018). This gives us six channels total. The outputs are: ``` (Pdb) model.config.channels ['vbf_channel_16_high_cat', 'vbf_channel_16_low_cat', 'vbf_channel_17_high_cat', 'vbf_channel_17_low_cat', 'vbf_channel_18_high_cat', 'vbf_channel_18_low_cat'] (Pdb) model.config.channel_nbins {'vbf_channel_16_low_cat': 12, 'vbf_channel_16_high_cat': 18, 'vbf_channel_17_low_cat': 12, 'vbf_channel_17_high_cat': 18, 'vbf_channel_18_low_cat': 12, 'vbf_channel_18_high_cat': 18} ``` I believe that `model.config.channels` is possibly re-ordering the channels so that the names are in alphabetical order. I have not confirmed this, though. The workspace .json file is filled with the ordering produced by `model.config.channel_nbins`. # Steps to Reproduce I'm using pyhf version 0.6.2 along with python 3.8.8. I can make a dummy workspace for this issue, but I thought that since this is a pretty small issue, it might be overkill. Please let me know if this would be helpful, though. # Checklist - [ ] Run `git fetch` to get the most up to date version of `master` - [X] Searched through existing Issues to confirm this is not a duplicate issue - [X] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue Thanks! -Rachel </issue> <code> [start of src/pyhf/mixins.py] 1 import logging 2 3 log = logging.getLogger(__name__) 4 5 6 class _ChannelSummaryMixin: 7 """ 8 A mixin that provides summary data of the provided channels. 9 10 This mixin will forward all other information to other classes defined in the Child class. 11 12 Args: 13 **channels: A list of channels to provide summary information about. Follows the `defs.json#/definitions/channel` schema. 14 """ 15 16 def __init__(self, *args, **kwargs): 17 channels = kwargs.pop('channels') 18 super().__init__(*args, **kwargs) 19 self.channels = [] 20 self.samples = [] 21 self.parameters = [] 22 self.modifiers = [] 23 # keep track of the width of each channel (how many bins) 24 self.channel_nbins = {} 25 # need to keep track in which order we added the constraints 26 # so that we can generate correctly-ordered data 27 for channel in channels: 28 self.channels.append(channel['name']) 29 self.channel_nbins[channel['name']] = len(channel['samples'][0]['data']) 30 for sample in channel['samples']: 31 self.samples.append(sample['name']) 32 for modifier_def in sample['modifiers']: 33 self.parameters.append(modifier_def['name']) 34 self.modifiers.append( 35 ( 36 modifier_def['name'], # mod name 37 modifier_def['type'], # mod type 38 ) 39 ) 40 41 self.channels = sorted(list(set(self.channels))) 42 self.samples = sorted(list(set(self.samples))) 43 self.parameters = sorted(list(set(self.parameters))) 44 self.modifiers = sorted(list(set(self.modifiers))) 45 46 self.channel_slices = {} 47 begin = 0 48 for c in self.channels: 49 end = begin + self.channel_nbins[c] 50 self.channel_slices[c] = slice(begin, end) 51 begin = end 52 [end of src/pyhf/mixins.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pyhf/mixins.py b/src/pyhf/mixins.py --- a/src/pyhf/mixins.py +++ b/src/pyhf/mixins.py @@ -42,6 +42,9 @@ self.samples = sorted(list(set(self.samples))) self.parameters = sorted(list(set(self.parameters))) self.modifiers = sorted(list(set(self.modifiers))) + self.channel_nbins = { + channel: self.channel_nbins[channel] for channel in self.channels + } self.channel_slices = {} begin = 0
{"golden_diff": "diff --git a/src/pyhf/mixins.py b/src/pyhf/mixins.py\n--- a/src/pyhf/mixins.py\n+++ b/src/pyhf/mixins.py\n@@ -42,6 +42,9 @@\n self.samples = sorted(list(set(self.samples)))\n self.parameters = sorted(list(set(self.parameters)))\n self.modifiers = sorted(list(set(self.modifiers)))\n+ self.channel_nbins = {\n+ channel: self.channel_nbins[channel] for channel in self.channels\n+ }\n \n self.channel_slices = {}\n begin = 0\n", "issue": "Different ordering of channels between model.config.channels and mode.config.channel_nbins\n# Description\r\n\r\nWe've recently observed that the ordering of channels outputted from `model.config.channels` differs from the order obtained with `model.config.channel_nbins`. This isn't really a bug, but more a quirk which we thought would be worth bringing to the attention of the developers. We ran into some issues when breaking up the `expected_data` list into individual channels by looping through the `model.config.channel_nbins` ordering, rather than the `model.config.channels` ordering (the `model.config.channels` order matches that of the model). Hopefully this issue helps save another user some time in the future, at very least. \r\n\r\n# Expected Behavior\r\n\r\nWe expected that the order of the channels in the two dictionaries from `model.config.channels` and `model.config.channel_nbins` would be the same. \r\n\r\n# Actual Behavior\r\n\r\nThe ordering of the channels is different. As an example, we are generating workspaces using two categories, and we're separating our data by year (2016, 2017, and 2018). This gives us six channels total. The outputs are: \r\n```\r\n(Pdb) model.config.channels\r\n['vbf_channel_16_high_cat', 'vbf_channel_16_low_cat', 'vbf_channel_17_high_cat', 'vbf_channel_17_low_cat', 'vbf_channel_18_high_cat', 'vbf_channel_18_low_cat']\r\n\r\n(Pdb) model.config.channel_nbins\r\n{'vbf_channel_16_low_cat': 12, 'vbf_channel_16_high_cat': 18, 'vbf_channel_17_low_cat': 12, 'vbf_channel_17_high_cat': 18, 'vbf_channel_18_low_cat': 12, 'vbf_channel_18_high_cat': 18}\r\n```\r\nI believe that `model.config.channels` is possibly re-ordering the channels so that the names are in alphabetical order. I have not confirmed this, though. The workspace .json file is filled with the ordering produced by `model.config.channel_nbins`. \r\n\r\n# Steps to Reproduce\r\n\r\nI'm using pyhf version 0.6.2 along with python 3.8.8. \r\nI can make a dummy workspace for this issue, but I thought that since this is a pretty small issue, it might be overkill. Please let me know if this would be helpful, though. \r\n\r\n# Checklist\r\n\r\n- [ ] Run `git fetch` to get the most up to date version of `master`\r\n- [X] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [X] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\r\n\r\nThanks! -Rachel \r\n\n", "before_files": [{"content": "import logging\n\nlog = logging.getLogger(__name__)\n\n\nclass _ChannelSummaryMixin:\n \"\"\"\n A mixin that provides summary data of the provided channels.\n\n This mixin will forward all other information to other classes defined in the Child class.\n\n Args:\n **channels: A list of channels to provide summary information about. Follows the `defs.json#/definitions/channel` schema.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n channels = kwargs.pop('channels')\n super().__init__(*args, **kwargs)\n self.channels = []\n self.samples = []\n self.parameters = []\n self.modifiers = []\n # keep track of the width of each channel (how many bins)\n self.channel_nbins = {}\n # need to keep track in which order we added the constraints\n # so that we can generate correctly-ordered data\n for channel in channels:\n self.channels.append(channel['name'])\n self.channel_nbins[channel['name']] = len(channel['samples'][0]['data'])\n for sample in channel['samples']:\n self.samples.append(sample['name'])\n for modifier_def in sample['modifiers']:\n self.parameters.append(modifier_def['name'])\n self.modifiers.append(\n (\n modifier_def['name'], # mod name\n modifier_def['type'], # mod type\n )\n )\n\n self.channels = sorted(list(set(self.channels)))\n self.samples = sorted(list(set(self.samples)))\n self.parameters = sorted(list(set(self.parameters)))\n self.modifiers = sorted(list(set(self.modifiers)))\n\n self.channel_slices = {}\n begin = 0\n for c in self.channels:\n end = begin + self.channel_nbins[c]\n self.channel_slices[c] = slice(begin, end)\n begin = end\n", "path": "src/pyhf/mixins.py"}]}
1,630
125
gh_patches_debug_12825
rasdani/github-patches
git_diff
fossasia__open-event-server-4403
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Verify Email endpoint doesn’t work. **I'm submitting a ...** (check one with "x") - [X] bug report - [ ] feature request - [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server There are two specific errors in the function. - [x] Firstly the email verification state change isn’t saved in db - [x] Response for the view function is missing. </issue> <code> [start of app/api/auth.py] 1 import base64 2 from flask import request, jsonify, abort, make_response, Blueprint 3 from flask_jwt import current_identity as current_user, jwt_required 4 from sqlalchemy.orm.exc import NoResultFound 5 6 from app import get_settings 7 from app.api.helpers.db import save_to_db 8 from app.api.helpers.files import make_frontend_url 9 from app.api.helpers.mail import send_email_with_action 10 11 from app.api.helpers.utilities import get_serializer 12 from app.models.mail import PASSWORD_RESET 13 from app.models.user import User 14 15 auth_routes = Blueprint('auth', __name__, url_prefix='/v1/auth') 16 17 18 @auth_routes.route('/verify-email', methods=['POST']) 19 def verify_email(): 20 token = base64.b64decode(request.json['data']['token']) 21 s = get_serializer() 22 23 try: 24 data = s.loads(token) 25 except Exception: 26 return abort( 27 make_response(jsonify(error="Invalid Token"), 400) 28 ) 29 30 try: 31 user = User.query.filter_by(email=data[0]).one() 32 except Exception: 33 return abort( 34 make_response(jsonify(error="Invalid Token"), 400) 35 ) 36 else: 37 user.is_verified = True 38 39 40 @auth_routes.route('/reset-password', methods=['POST']) 41 def reset_password_post(): 42 email = request.json['data']['email'] 43 44 try: 45 user = User.query.filter_by(email=email).one() 46 except NoResultFound: 47 return abort( 48 make_response(jsonify(error="User not found"), 404) 49 ) 50 else: 51 link = make_frontend_url('/reset-password', {'token': user.reset_password}) 52 send_email_with_action(user, PASSWORD_RESET, app_name=get_settings()['app_name'], link=link) 53 54 return make_response(jsonify(message="Email Sent"), 200) 55 56 57 @auth_routes.route('/reset-password', methods=['PATCH']) 58 def reset_password_patch(): 59 token = request.json['data']['token'] 60 password = request.json['data']['password'] 61 62 try: 63 user = User.query.filter_by(reset_password=token).one() 64 except NoResultFound: 65 return abort( 66 make_response(jsonify(error="User not found"), 404) 67 ) 68 else: 69 user.password = password 70 save_to_db(user) 71 72 return jsonify({ 73 "id": user.id, 74 "email": user.email, 75 "name": user.name if user.get('name') else None 76 }) 77 78 79 @auth_routes.route('/change-password', methods=['POST']) 80 @jwt_required() 81 def change_password(): 82 old_password = request.json['data']['old-password'] 83 new_password = request.json['data']['new-password'] 84 85 try: 86 user = User.query.filter_by(id=current_user.id).one() 87 except NoResultFound: 88 return abort( 89 make_response(jsonify(error="User not found"), 404) 90 ) 91 else: 92 if user.is_correct_password(old_password): 93 94 user.password = new_password 95 save_to_db(user) 96 else: 97 return make_response(jsonify(error="Wrong Password"), 400) 98 99 return jsonify({ 100 "id": user.id, 101 "email": user.email, 102 "name": user.fullname if user.fullname else None, 103 "password_changed": True 104 }) 105 [end of app/api/auth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/api/auth.py b/app/api/auth.py --- a/app/api/auth.py +++ b/app/api/auth.py @@ -35,6 +35,8 @@ ) else: user.is_verified = True + save_to_db(user) + return make_response(jsonify(message="Email Verified"), 200) @auth_routes.route('/reset-password', methods=['POST']) @@ -94,7 +96,9 @@ user.password = new_password save_to_db(user) else: - return make_response(jsonify(error="Wrong Password"), 400) + return abort( + make_response(jsonify(error="Wrong Password"), 400) + ) return jsonify({ "id": user.id,
{"golden_diff": "diff --git a/app/api/auth.py b/app/api/auth.py\n--- a/app/api/auth.py\n+++ b/app/api/auth.py\n@@ -35,6 +35,8 @@\n )\n else:\n user.is_verified = True\n+ save_to_db(user)\n+ return make_response(jsonify(message=\"Email Verified\"), 200)\n \n \n @auth_routes.route('/reset-password', methods=['POST'])\n@@ -94,7 +96,9 @@\n user.password = new_password\n save_to_db(user)\n else:\n- return make_response(jsonify(error=\"Wrong Password\"), 400)\n+ return abort(\n+ make_response(jsonify(error=\"Wrong Password\"), 400)\n+ )\n \n return jsonify({\n \"id\": user.id,\n", "issue": " Verify Email endpoint doesn\u2019t work.\n**I'm submitting a ...** (check one with \"x\")\r\n- [X] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\nThere are two specific errors in the function.\r\n- [x] Firstly the email verification state change isn\u2019t saved in db\r\n- [x] Response for the view function is missing.\r\n\n", "before_files": [{"content": "import base64\nfrom flask import request, jsonify, abort, make_response, Blueprint\nfrom flask_jwt import current_identity as current_user, jwt_required\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app import get_settings\nfrom app.api.helpers.db import save_to_db\nfrom app.api.helpers.files import make_frontend_url\nfrom app.api.helpers.mail import send_email_with_action\n\nfrom app.api.helpers.utilities import get_serializer\nfrom app.models.mail import PASSWORD_RESET\nfrom app.models.user import User\n\nauth_routes = Blueprint('auth', __name__, url_prefix='/v1/auth')\n\n\n@auth_routes.route('/verify-email', methods=['POST'])\ndef verify_email():\n token = base64.b64decode(request.json['data']['token'])\n s = get_serializer()\n\n try:\n data = s.loads(token)\n except Exception:\n return abort(\n make_response(jsonify(error=\"Invalid Token\"), 400)\n )\n\n try:\n user = User.query.filter_by(email=data[0]).one()\n except Exception:\n return abort(\n make_response(jsonify(error=\"Invalid Token\"), 400)\n )\n else:\n user.is_verified = True\n\n\n@auth_routes.route('/reset-password', methods=['POST'])\ndef reset_password_post():\n email = request.json['data']['email']\n\n try:\n user = User.query.filter_by(email=email).one()\n except NoResultFound:\n return abort(\n make_response(jsonify(error=\"User not found\"), 404)\n )\n else:\n link = make_frontend_url('/reset-password', {'token': user.reset_password})\n send_email_with_action(user, PASSWORD_RESET, app_name=get_settings()['app_name'], link=link)\n\n return make_response(jsonify(message=\"Email Sent\"), 200)\n\n\n@auth_routes.route('/reset-password', methods=['PATCH'])\ndef reset_password_patch():\n token = request.json['data']['token']\n password = request.json['data']['password']\n\n try:\n user = User.query.filter_by(reset_password=token).one()\n except NoResultFound:\n return abort(\n make_response(jsonify(error=\"User not found\"), 404)\n )\n else:\n user.password = password\n save_to_db(user)\n\n return jsonify({\n \"id\": user.id,\n \"email\": user.email,\n \"name\": user.name if user.get('name') else None\n })\n\n\n@auth_routes.route('/change-password', methods=['POST'])\n@jwt_required()\ndef change_password():\n old_password = request.json['data']['old-password']\n new_password = request.json['data']['new-password']\n\n try:\n user = User.query.filter_by(id=current_user.id).one()\n except NoResultFound:\n return abort(\n make_response(jsonify(error=\"User not found\"), 404)\n )\n else:\n if user.is_correct_password(old_password):\n\n user.password = new_password\n save_to_db(user)\n else:\n return make_response(jsonify(error=\"Wrong Password\"), 400)\n\n return jsonify({\n \"id\": user.id,\n \"email\": user.email,\n \"name\": user.fullname if user.fullname else None,\n \"password_changed\": True\n })\n", "path": "app/api/auth.py"}]}
1,553
171
gh_patches_debug_7577
rasdani/github-patches
git_diff
webkom__lego-24
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add shell pluss and django extensions </issue> <code> [start of lego/settings/base.py] 1 import sys 2 import os 3 BASE_DIR = os.path.dirname(os.path.dirname(__file__)) 4 5 SECRET_KEY = 'This is supersecret' 6 7 TESTING = 'test' in sys.argv # Check if manage.py test has been run 8 9 DEBUG = True 10 TEMPLATE_DEBUG = True 11 ALLOWED_HOSTS = [] 12 13 AUTH_USER_MODEL = 'users.User' 14 15 INSTALLED_APPS = ( 16 'django.contrib.admin', 17 'django.contrib.auth', 18 'django.contrib.contenttypes', 19 'django.contrib.sessions', 20 'django.contrib.messages', 21 'django.contrib.staticfiles', 22 23 'oauth2_provider', 24 'rest_framework', 25 26 'lego.apps.LegoConfig', 27 'lego.users', 28 29 'lego.app.oauth', 30 ) 31 32 AUTHENTICATION_BACKEND = ( 33 'oauth2_provider.backends.OAuth2Backend' 34 ) 35 36 MIDDLEWARE_CLASSES = ( 37 'django.contrib.sessions.middleware.SessionMiddleware', 38 'django.middleware.common.CommonMiddleware', 39 'django.middleware.csrf.CsrfViewMiddleware', 40 'oauth2_provider.middleware.OAuth2TokenMiddleware', 41 'django.contrib.auth.middleware.AuthenticationMiddleware', 42 'django.contrib.messages.middleware.MessageMiddleware', 43 'django.middleware.clickjacking.XFrameOptionsMiddleware', 44 ) 45 46 OAUTH2_PROVIDER_APPLICATION_MODEL = 'oauth.APIApplication' 47 48 ROOT_URLCONF = 'lego.urls' 49 50 WSGI_APPLICATION = 'lego.wsgi.application' 51 52 LANGUAGE_CODE = 'en-us' 53 TIME_ZONE = 'UTC' 54 USE_I18N = True 55 USE_L10N = True 56 USE_TZ = True 57 DEBUG = True 58 59 STATIC_URL = '/static/' 60 STATIC_ROOT = os.path.join(BASE_DIR, '../static') 61 MEDIA_URL = '/uploads/' 62 MEDIA_ROOT = os.path.join(BASE_DIR, '../uploads') 63 64 TEMPLATE_DIRS = ( 65 os.path.join(BASE_DIR, 'templates'), 66 ) 67 [end of lego/settings/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lego/settings/base.py b/lego/settings/base.py --- a/lego/settings/base.py +++ b/lego/settings/base.py @@ -6,6 +6,8 @@ TESTING = 'test' in sys.argv # Check if manage.py test has been run +SHELL_PLUS = "ipython" + DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] @@ -20,6 +22,8 @@ 'django.contrib.messages', 'django.contrib.staticfiles', + 'django_extensions', + 'oauth2_provider', 'rest_framework',
{"golden_diff": "diff --git a/lego/settings/base.py b/lego/settings/base.py\n--- a/lego/settings/base.py\n+++ b/lego/settings/base.py\n@@ -6,6 +6,8 @@\n \n TESTING = 'test' in sys.argv # Check if manage.py test has been run\n \n+SHELL_PLUS = \"ipython\"\n+\n DEBUG = True\n TEMPLATE_DEBUG = True\n ALLOWED_HOSTS = []\n@@ -20,6 +22,8 @@\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n \n+ 'django_extensions',\n+\n 'oauth2_provider',\n 'rest_framework',\n", "issue": "Add shell pluss and django extensions\n\n", "before_files": [{"content": "import sys\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nSECRET_KEY = 'This is supersecret'\n\nTESTING = 'test' in sys.argv # Check if manage.py test has been run\n\nDEBUG = True\nTEMPLATE_DEBUG = True\nALLOWED_HOSTS = []\n\nAUTH_USER_MODEL = 'users.User'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'oauth2_provider',\n 'rest_framework',\n\n 'lego.apps.LegoConfig',\n 'lego.users',\n\n 'lego.app.oauth',\n)\n\nAUTHENTICATION_BACKEND = (\n 'oauth2_provider.backends.OAuth2Backend'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'oauth2_provider.middleware.OAuth2TokenMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nOAUTH2_PROVIDER_APPLICATION_MODEL = 'oauth.APIApplication'\n\nROOT_URLCONF = 'lego.urls'\n\nWSGI_APPLICATION = 'lego.wsgi.application'\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nDEBUG = True\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, '../static')\nMEDIA_URL = '/uploads/'\nMEDIA_ROOT = os.path.join(BASE_DIR, '../uploads')\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates'),\n)\n", "path": "lego/settings/base.py"}]}
1,048
135
gh_patches_debug_25227
rasdani/github-patches
git_diff
Textualize__textual-584
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Lazy load Widgets We have a `widgets` module that imports all the widgets. THis allows for easy imports like: ```python from textual.widgets import DataTable, Button ``` The downside is that all widgets are imported even if you only need one, increasing startup time. I think we should be able to do this lazily some how, possibly by implementing a module level `__getattr__`. </issue> <code> [start of src/textual/widgets/__init__.py] 1 from ._footer import Footer 2 from ._header import Header 3 from ._button import Button 4 from ._placeholder import Placeholder 5 from ._static import Static 6 from ._tree_control import TreeControl, TreeClick, TreeNode, NodeID 7 from ._directory_tree import DirectoryTree, FileClick 8 9 __all__ = [ 10 "Button", 11 "DirectoryTree", 12 "FileClick", 13 "Footer", 14 "Header", 15 "Placeholder", 16 "Static", 17 "TreeClick", 18 "TreeControl", 19 "TreeNode", 20 "NodeID", 21 ] 22 [end of src/textual/widgets/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/textual/widgets/__init__.py b/src/textual/widgets/__init__.py --- a/src/textual/widgets/__init__.py +++ b/src/textual/widgets/__init__.py @@ -1,21 +1,41 @@ -from ._footer import Footer -from ._header import Header -from ._button import Button -from ._placeholder import Placeholder -from ._static import Static -from ._tree_control import TreeControl, TreeClick, TreeNode, NodeID -from ._directory_tree import DirectoryTree, FileClick +from __future__ import annotations +from importlib import import_module +import typing +from ..case import camel_to_snake + +if typing.TYPE_CHECKING: + from ..widget import Widget + + +# ⚠️For any new built-in Widget we create, not only we have to add them to the following list, but also to the +# `__init__.pyi` file in this same folder - otherwise text editors and type checkers won't be able to "see" them. __all__ = [ "Button", "DirectoryTree", - "FileClick", "Footer", "Header", "Placeholder", "Static", - "TreeClick", "TreeControl", - "TreeNode", - "NodeID", ] + + +_WIDGETS_LAZY_LOADING_CACHE: dict[str, type[Widget]] = {} + +# Let's decrease startup time by lazy loading our Widgets: +def __getattr__(widget_class: str) -> type[Widget]: + try: + return _WIDGETS_LAZY_LOADING_CACHE[widget_class] + except KeyError: + pass + + if widget_class not in __all__: + raise ImportError(f"Package 'textual.widgets' has no class '{widget_class}'") + + widget_module_path = f"._{camel_to_snake(widget_class)}" + module = import_module(widget_module_path, package="textual.widgets") + class_ = getattr(module, widget_class) + + _WIDGETS_LAZY_LOADING_CACHE[widget_class] = class_ + return class_
{"golden_diff": "diff --git a/src/textual/widgets/__init__.py b/src/textual/widgets/__init__.py\n--- a/src/textual/widgets/__init__.py\n+++ b/src/textual/widgets/__init__.py\n@@ -1,21 +1,41 @@\n-from ._footer import Footer\n-from ._header import Header\n-from ._button import Button\n-from ._placeholder import Placeholder\n-from ._static import Static\n-from ._tree_control import TreeControl, TreeClick, TreeNode, NodeID\n-from ._directory_tree import DirectoryTree, FileClick\n+from __future__ import annotations\n+from importlib import import_module\n+import typing\n \n+from ..case import camel_to_snake\n+\n+if typing.TYPE_CHECKING:\n+ from ..widget import Widget\n+\n+\n+# \u26a0\ufe0fFor any new built-in Widget we create, not only we have to add them to the following list, but also to the\n+# `__init__.pyi` file in this same folder - otherwise text editors and type checkers won't be able to \"see\" them.\n __all__ = [\n \"Button\",\n \"DirectoryTree\",\n- \"FileClick\",\n \"Footer\",\n \"Header\",\n \"Placeholder\",\n \"Static\",\n- \"TreeClick\",\n \"TreeControl\",\n- \"TreeNode\",\n- \"NodeID\",\n ]\n+\n+\n+_WIDGETS_LAZY_LOADING_CACHE: dict[str, type[Widget]] = {}\n+\n+# Let's decrease startup time by lazy loading our Widgets:\n+def __getattr__(widget_class: str) -> type[Widget]:\n+ try:\n+ return _WIDGETS_LAZY_LOADING_CACHE[widget_class]\n+ except KeyError:\n+ pass\n+\n+ if widget_class not in __all__:\n+ raise ImportError(f\"Package 'textual.widgets' has no class '{widget_class}'\")\n+\n+ widget_module_path = f\"._{camel_to_snake(widget_class)}\"\n+ module = import_module(widget_module_path, package=\"textual.widgets\")\n+ class_ = getattr(module, widget_class)\n+\n+ _WIDGETS_LAZY_LOADING_CACHE[widget_class] = class_\n+ return class_\n", "issue": "Lazy load Widgets\nWe have a `widgets` module that imports all the widgets. THis allows for easy imports like:\r\n\r\n```python\r\nfrom textual.widgets import DataTable, Button\r\n```\r\n\r\nThe downside is that all widgets are imported even if you only need one, increasing startup time.\r\n\r\nI think we should be able to do this lazily some how, possibly by implementing a module level `__getattr__`.\n", "before_files": [{"content": "from ._footer import Footer\nfrom ._header import Header\nfrom ._button import Button\nfrom ._placeholder import Placeholder\nfrom ._static import Static\nfrom ._tree_control import TreeControl, TreeClick, TreeNode, NodeID\nfrom ._directory_tree import DirectoryTree, FileClick\n\n__all__ = [\n \"Button\",\n \"DirectoryTree\",\n \"FileClick\",\n \"Footer\",\n \"Header\",\n \"Placeholder\",\n \"Static\",\n \"TreeClick\",\n \"TreeControl\",\n \"TreeNode\",\n \"NodeID\",\n]\n", "path": "src/textual/widgets/__init__.py"}]}
772
459
gh_patches_debug_14294
rasdani/github-patches
git_diff
scikit-image__scikit-image-4172
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> IO: unhandled exception, in case the URL cannot be opened ## Description ![image](https://user-images.githubusercontent.com/30770221/53887634-4a1eb600-3fe8-11e9-97db-f374815eb620.png) ## Way to reproduce ```python def url_to_image(url): """download image from url and return it""" im_mat = io.imread(url) image = cv2.imdecode(im_mat, cv2.IMREAD_COLOR) # return the image return image ``` ## Version information ```python >>> from __future__ import print_function >>> import sys; print(sys.version) 3.7.2 (tags/v3.7.2:9a3ffc0492, Dec 23 2018, 23:09:28) [MSC v.1916 64 bit (AMD64)] >>> import platform; print(platform.platform()) Windows-10-10.0.17134-SP0 >>> import skimage; print("scikit-image version: {}".format(skimage.__version__)) scikit-image version: 0.14.2 >>> import numpy; print("numpy version: {}".format(numpy.__version__)) ``` ```python Traceback (most recent call last): File "C:\Python37\lib\site-packages\skimage\io\util.py", line 28, in file_or_url_context with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f: File "C:\Python37\lib\tempfile.py", line 547, in NamedTemporaryFile (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type) File "C:\Python37\lib\tempfile.py", line 258, in _mkstemp_inner fd = _os.open(file, flags, 0o600) OSError: [Errno 22] Invalid argument: 'C:\\Users\\kelvin\\AppData\\Local\\Temp\\tmpegjt2y8r.png?mi_guid=CFCDF62B5115D31389F2893D3CCFFA5CB42BDE7F8CD696BB57B97B8BB567940F&mi_language=EN' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "imagecheck.py", line 28, in <module> print(url_to_image("http://www.movable-ink-6437.com/p/rp/0fd853751b1edbb0.png?mi_guid=CFCDF62B5115D31389F2893D3CCFFA5CB42BDE7F8CD696BB57B97B8BB567940F&mi_language=EN")) File "imagecheck.py", line 9, in url_to_image im_mat = io.imread(url) File "C:\Python37\lib\site-packages\skimage\io\_io.py", line 61, in imread with file_or_url_context(fname) as fname: File "C:\Python37\lib\contextlib.py", line 112, in __enter__ return next(self.gen) File "C:\Python37\lib\site-packages\skimage\io\util.py", line 34, in file_or_url_context os.remove(f.name) UnboundLocalError: local variable 'f' referenced before assignment ``` </issue> <code> [start of skimage/io/util.py] 1 import urllib.parse 2 import urllib.request 3 4 import os 5 import re 6 import tempfile 7 from contextlib import contextmanager 8 9 10 URL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\') 11 12 13 def is_url(filename): 14 """Return True if string is an http or ftp path.""" 15 return (isinstance(filename, str) and 16 URL_REGEX.match(filename) is not None) 17 18 19 @contextmanager 20 def file_or_url_context(resource_name): 21 """Yield name of file from the given resource (i.e. file or url).""" 22 if is_url(resource_name): 23 url_components = urllib.parse.urlparse(resource_name) 24 _, ext = os.path.splitext(url_components.path) 25 try: 26 with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f: 27 u = urllib.request.urlopen(resource_name) 28 f.write(u.read()) 29 # f must be closed before yielding 30 yield f.name 31 finally: 32 os.remove(f.name) 33 else: 34 yield resource_name 35 [end of skimage/io/util.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/skimage/io/util.py b/skimage/io/util.py --- a/skimage/io/util.py +++ b/skimage/io/util.py @@ -1,5 +1,6 @@ import urllib.parse import urllib.request +from urllib.error import URLError, HTTPError import os import re @@ -28,7 +29,15 @@ f.write(u.read()) # f must be closed before yielding yield f.name - finally: + except (URLError, HTTPError): + # could not open URL + os.remove(f.name) + raise + except (FileNotFoundError, FileExistsError, + PermissionError, BaseException): + # could not create temporary file + raise + else: os.remove(f.name) else: yield resource_name
{"golden_diff": "diff --git a/skimage/io/util.py b/skimage/io/util.py\n--- a/skimage/io/util.py\n+++ b/skimage/io/util.py\n@@ -1,5 +1,6 @@\n import urllib.parse\n import urllib.request\n+from urllib.error import URLError, HTTPError\n \n import os\n import re\n@@ -28,7 +29,15 @@\n f.write(u.read())\n # f must be closed before yielding\n yield f.name\n- finally:\n+ except (URLError, HTTPError):\n+ # could not open URL\n+ os.remove(f.name)\n+ raise\n+ except (FileNotFoundError, FileExistsError,\n+ PermissionError, BaseException):\n+ # could not create temporary file\n+ raise\n+ else:\n os.remove(f.name)\n else:\n yield resource_name\n", "issue": "IO: unhandled exception, in case the URL cannot be opened\n## Description\r\n![image](https://user-images.githubusercontent.com/30770221/53887634-4a1eb600-3fe8-11e9-97db-f374815eb620.png)\r\n\r\n\r\n## Way to reproduce\r\n```python\r\ndef url_to_image(url):\r\n \"\"\"download image from url and return it\"\"\"\r\n im_mat = io.imread(url)\r\n image = cv2.imdecode(im_mat, cv2.IMREAD_COLOR)\r\n \r\n # return the image\r\n return image\r\n```\r\n\r\n\r\n## Version information\r\n```python\r\n>>> from __future__ import print_function\r\n>>> import sys; print(sys.version)\r\n3.7.2 (tags/v3.7.2:9a3ffc0492, Dec 23 2018, 23:09:28) [MSC v.1916 64 bit (AMD64)]\r\n>>> import platform; print(platform.platform())\r\nWindows-10-10.0.17134-SP0\r\n>>> import skimage; print(\"scikit-image version: {}\".format(skimage.__version__))\r\nscikit-image version: 0.14.2\r\n>>> import numpy; print(\"numpy version: {}\".format(numpy.__version__))\r\n```\r\n\r\n```python\r\nTraceback (most recent call last):\r\n File \"C:\\Python37\\lib\\site-packages\\skimage\\io\\util.py\", line 28, in file_or_url_context\r\n with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:\r\n File \"C:\\Python37\\lib\\tempfile.py\", line 547, in NamedTemporaryFile\r\n (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)\r\n File \"C:\\Python37\\lib\\tempfile.py\", line 258, in _mkstemp_inner\r\n fd = _os.open(file, flags, 0o600)\r\nOSError: [Errno 22] Invalid argument: 'C:\\\\Users\\\\kelvin\\\\AppData\\\\Local\\\\Temp\\\\tmpegjt2y8r.png?mi_guid=CFCDF62B5115D31389F2893D3CCFFA5CB42BDE7F8CD696BB57B97B8BB567940F&mi_language=EN'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"imagecheck.py\", line 28, in <module>\r\n print(url_to_image(\"http://www.movable-ink-6437.com/p/rp/0fd853751b1edbb0.png?mi_guid=CFCDF62B5115D31389F2893D3CCFFA5CB42BDE7F8CD696BB57B97B8BB567940F&mi_language=EN\"))\r\n File \"imagecheck.py\", line 9, in url_to_image\r\n im_mat = io.imread(url)\r\n File \"C:\\Python37\\lib\\site-packages\\skimage\\io\\_io.py\", line 61, in imread\r\n with file_or_url_context(fname) as fname:\r\n File \"C:\\Python37\\lib\\contextlib.py\", line 112, in __enter__\r\n return next(self.gen)\r\n File \"C:\\Python37\\lib\\site-packages\\skimage\\io\\util.py\", line 34, in file_or_url_context\r\n os.remove(f.name)\r\nUnboundLocalError: local variable 'f' referenced before assignment\r\n```\r\n\r\n\n", "before_files": [{"content": "import urllib.parse\nimport urllib.request\n\nimport os\nimport re\nimport tempfile\nfrom contextlib import contextmanager\n\n\nURL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\\\')\n\n\ndef is_url(filename):\n \"\"\"Return True if string is an http or ftp path.\"\"\"\n return (isinstance(filename, str) and\n URL_REGEX.match(filename) is not None)\n\n\n@contextmanager\ndef file_or_url_context(resource_name):\n \"\"\"Yield name of file from the given resource (i.e. file or url).\"\"\"\n if is_url(resource_name):\n url_components = urllib.parse.urlparse(resource_name)\n _, ext = os.path.splitext(url_components.path)\n try:\n with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:\n u = urllib.request.urlopen(resource_name)\n f.write(u.read())\n # f must be closed before yielding\n yield f.name\n finally:\n os.remove(f.name)\n else:\n yield resource_name\n", "path": "skimage/io/util.py"}]}
1,656
188
gh_patches_debug_33372
rasdani/github-patches
git_diff
rasterio__rasterio-241
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> rio merge assumes nodata Running `rio merge` over a set of images without a nodata value results in an error: ``` (rio-test)$ rio merge warped.tif merged.tif ERROR:rio:Failed. Exception caught Traceback (most recent call last): File "/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py", line 50, in merge dest.fill(nodataval) TypeError: long() argument must be a string or a number, not 'NoneType' ``` Checking for nodata in `rasterio/rio/merge.py` still results in a lower level error (https://github.com/mapbox/rasterio/commit/6b251f8261fd1b3e785dc73462212741b6bd62b7): ``` (rio-test)$ rio merge warped.tif merged.tif ERROR:rio:Failed. Exception caught Traceback (most recent call last): File "/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py", line 55, in merge data = src.read() File "rasterio/_io.pyx", line 654, in rasterio._io.RasterReader.read (rasterio/_io.c:8075) arr.fill(ndv) TypeError: long() argument must be a string or a number, not 'NoneType' ``` Patching this case throws the error down further until a regular numpy array is confused for a masked array (https://github.com/mapbox/rasterio/commit/c271c4ee23531db7c24208c85f56d04d79acc851): ``` (rio-test)$ rio merge warped.tif merged.tif /Users/amit/Mapbox/rasterio/rasterio/rio/merge.py:58: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future. dest==nodataval, data.mask==False)) ERROR:rio:Failed. Exception caught Traceback (most recent call last): File "/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py", line 58, in merge dest==nodataval, data.mask==False)) AttributeError: 'numpy.ndarray' object has no attribute 'mask' ``` /cc @sgillies </issue> <code> [start of rasterio/rio/merge.py] 1 # Merge command. 2 3 import logging 4 import os.path 5 import sys 6 7 import click 8 from cligj import files_inout_arg, format_opt 9 10 import rasterio 11 12 from rasterio.rio.cli import cli 13 14 15 @cli.command(short_help="Merge a stack of raster datasets.") 16 @files_inout_arg 17 @format_opt 18 @click.pass_context 19 def merge(ctx, files, driver): 20 """Copy valid pixels from input files to an output file. 21 22 All files must have the same shape, number of bands, and data type. 23 24 Input files are merged in their listed order using a reverse 25 painter's algorithm. 26 """ 27 import numpy as np 28 29 verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1 30 logger = logging.getLogger('rio') 31 try: 32 with rasterio.drivers(CPL_DEBUG=verbosity>2): 33 output = files[-1] 34 files = files[:-1] 35 36 with rasterio.open(files[0]) as first: 37 kwargs = first.meta 38 kwargs['transform'] = kwargs.pop('affine') 39 dest = np.empty((first.count,) + first.shape, 40 dtype=first.dtypes[0]) 41 42 if os.path.exists(output): 43 dst = rasterio.open(output, 'r+') 44 nodataval = dst.nodatavals[0] 45 else: 46 kwargs['driver'] == driver 47 dst = rasterio.open(output, 'w', **kwargs) 48 nodataval = first.nodatavals[0] 49 50 dest.fill(nodataval) 51 52 for fname in reversed(files): 53 with rasterio.open(fname) as src: 54 data = src.read() 55 np.copyto(dest, data, 56 where=np.logical_and( 57 dest==nodataval, data.mask==False)) 58 59 if dst.mode == 'r+': 60 data = dst.read() 61 np.copyto(dest, data, 62 where=np.logical_and( 63 dest==nodataval, data.mask==False)) 64 65 dst.write(dest) 66 dst.close() 67 68 sys.exit(0) 69 except Exception: 70 logger.exception("Failed. Exception caught") 71 sys.exit(1) 72 [end of rasterio/rio/merge.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py --- a/rasterio/rio/merge.py +++ b/rasterio/rio/merge.py @@ -36,31 +36,39 @@ with rasterio.open(files[0]) as first: kwargs = first.meta kwargs['transform'] = kwargs.pop('affine') - dest = np.empty((first.count,) + first.shape, + dest = np.zeros((first.count,) + first.shape, dtype=first.dtypes[0]) + nodataval = next(dest.flat) if os.path.exists(output): dst = rasterio.open(output, 'r+') - nodataval = dst.nodatavals[0] + nodataval = dst.nodatavals[0] or nodataval else: kwargs['driver'] == driver dst = rasterio.open(output, 'w', **kwargs) - nodataval = first.nodatavals[0] + nodataval = first.nodatavals[0] or nodataval - dest.fill(nodataval) + if nodataval: + dest.fill(nodataval) for fname in reversed(files): with rasterio.open(fname) as src: data = src.read() - np.copyto(dest, data, - where=np.logical_and( - dest==nodataval, data.mask==False)) + try: + where = np.logical_and( + dest==nodataval, data.mask==False) + except AttributeError: + where = dest==nodataval + np.copyto(dest, data, where=where) if dst.mode == 'r+': data = dst.read() - np.copyto(dest, data, - where=np.logical_and( - dest==nodataval, data.mask==False)) + try: + where = np.logical_and( + dest==nodataval, data.mask==False) + except AttributeError: + where = dest==nodataval + np.copyto(dest, data, where=where) dst.write(dest) dst.close()
{"golden_diff": "diff --git a/rasterio/rio/merge.py b/rasterio/rio/merge.py\n--- a/rasterio/rio/merge.py\n+++ b/rasterio/rio/merge.py\n@@ -36,31 +36,39 @@\n with rasterio.open(files[0]) as first:\n kwargs = first.meta\n kwargs['transform'] = kwargs.pop('affine')\n- dest = np.empty((first.count,) + first.shape, \n+ dest = np.zeros((first.count,) + first.shape, \n dtype=first.dtypes[0])\n \n+ nodataval = next(dest.flat)\n if os.path.exists(output):\n dst = rasterio.open(output, 'r+')\n- nodataval = dst.nodatavals[0]\n+ nodataval = dst.nodatavals[0] or nodataval\n else:\n kwargs['driver'] == driver\n dst = rasterio.open(output, 'w', **kwargs)\n- nodataval = first.nodatavals[0]\n+ nodataval = first.nodatavals[0] or nodataval\n \n- dest.fill(nodataval)\n+ if nodataval:\n+ dest.fill(nodataval)\n \n for fname in reversed(files):\n with rasterio.open(fname) as src:\n data = src.read()\n- np.copyto(dest, data,\n- where=np.logical_and(\n- dest==nodataval, data.mask==False))\n+ try:\n+ where = np.logical_and(\n+ dest==nodataval, data.mask==False)\n+ except AttributeError:\n+ where = dest==nodataval\n+ np.copyto(dest, data, where=where)\n \n if dst.mode == 'r+':\n data = dst.read()\n- np.copyto(dest, data,\n- where=np.logical_and(\n- dest==nodataval, data.mask==False))\n+ try:\n+ where = np.logical_and(\n+ dest==nodataval, data.mask==False)\n+ except AttributeError:\n+ where = dest==nodataval\n+ np.copyto(dest, data, where=where)\n \n dst.write(dest)\n dst.close()\n", "issue": "rio merge assumes nodata\nRunning `rio merge` over a set of images without a nodata value results in an error:\n\n```\n(rio-test)$ rio merge warped.tif merged.tif\nERROR:rio:Failed. Exception caught\nTraceback (most recent call last):\n File \"/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py\", line 50, in merge\n dest.fill(nodataval)\nTypeError: long() argument must be a string or a number, not 'NoneType'\n```\n\nChecking for nodata in `rasterio/rio/merge.py` still results in a lower level error (https://github.com/mapbox/rasterio/commit/6b251f8261fd1b3e785dc73462212741b6bd62b7):\n\n```\n(rio-test)$ rio merge warped.tif merged.tif\nERROR:rio:Failed. Exception caught\nTraceback (most recent call last):\n File \"/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py\", line 55, in merge\n data = src.read()\n File \"rasterio/_io.pyx\", line 654, in rasterio._io.RasterReader.read (rasterio/_io.c:8075)\n arr.fill(ndv)\nTypeError: long() argument must be a string or a number, not 'NoneType'\n```\n\nPatching this case throws the error down further until a regular numpy array is confused for a masked array (https://github.com/mapbox/rasterio/commit/c271c4ee23531db7c24208c85f56d04d79acc851):\n\n```\n(rio-test)$ rio merge warped.tif merged.tif\n/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py:58: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.\n dest==nodataval, data.mask==False))\nERROR:rio:Failed. Exception caught\nTraceback (most recent call last):\n File \"/Users/amit/Mapbox/rasterio/rasterio/rio/merge.py\", line 58, in merge\n dest==nodataval, data.mask==False))\nAttributeError: 'numpy.ndarray' object has no attribute 'mask'\n```\n\n/cc @sgillies \n\n", "before_files": [{"content": "# Merge command.\n\nimport logging\nimport os.path\nimport sys\n\nimport click\nfrom cligj import files_inout_arg, format_opt\n\nimport rasterio\n\nfrom rasterio.rio.cli import cli\n\n\[email protected](short_help=\"Merge a stack of raster datasets.\")\n@files_inout_arg\n@format_opt\[email protected]_context\ndef merge(ctx, files, driver):\n \"\"\"Copy valid pixels from input files to an output file.\n\n All files must have the same shape, number of bands, and data type.\n\n Input files are merged in their listed order using a reverse\n painter's algorithm.\n \"\"\"\n import numpy as np\n\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n logger = logging.getLogger('rio')\n try:\n with rasterio.drivers(CPL_DEBUG=verbosity>2):\n output = files[-1]\n files = files[:-1]\n\n with rasterio.open(files[0]) as first:\n kwargs = first.meta\n kwargs['transform'] = kwargs.pop('affine')\n dest = np.empty((first.count,) + first.shape, \n dtype=first.dtypes[0])\n\n if os.path.exists(output):\n dst = rasterio.open(output, 'r+')\n nodataval = dst.nodatavals[0]\n else:\n kwargs['driver'] == driver\n dst = rasterio.open(output, 'w', **kwargs)\n nodataval = first.nodatavals[0]\n\n dest.fill(nodataval)\n\n for fname in reversed(files):\n with rasterio.open(fname) as src:\n data = src.read()\n np.copyto(dest, data,\n where=np.logical_and(\n dest==nodataval, data.mask==False))\n\n if dst.mode == 'r+':\n data = dst.read()\n np.copyto(dest, data,\n where=np.logical_and(\n dest==nodataval, data.mask==False))\n\n dst.write(dest)\n dst.close()\n\n sys.exit(0)\n except Exception:\n logger.exception(\"Failed. Exception caught\")\n sys.exit(1)\n", "path": "rasterio/rio/merge.py"}]}
1,670
492
gh_patches_debug_14851
rasdani/github-patches
git_diff
python-poetry__poetry-289
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CLI help for install is ambiguous <!-- Checked checkbox should look like this: [x] --> - [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version. - [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate. - [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option). - **OS version and name**: Linux 4.4.0-17134-Microsoft #137-Microsoft Thu Jun 14 18:46:00 PST 2018 x86_64 x86_64 x86_64 GNU/Linux - **Poetry version**: Poetry 0.11.2 - **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: No such file needed ## Issue Running `` poetry install --help`` produces the following output: ```sh Usage: install [options] Options: --no-dev Do not install dev dependencies. --dry-run Outputs the operations but will not execute anything (implicitly enables --verbose). -E, --extras=EXTRAS Extra sets of dependencies to install. (multiple values allowed) --develop=DEVELOP Install given packages in development mode. (multiple values allowed) -h, --help Display this help message -q, --quiet Do not output any message -V, --version Display this application version --ansi Force ANSI output --no-ansi Disable ANSI output -n, --no-interaction Do not ask any interactive question -v|vv|vvv, --verbose[=VERBOSE] Increase the verbosity of messages: 1 for normal output, 2 for more verbose output and 3 for debug Help: The install command reads the pyproject.toml file from the current directory, processes it, and downloads and installs all the libraries and dependencies outlined in that file. If the file does not exist it will look for pyproject.toml and do the same. poetry install ``` Note that in the final paragraph, ``pyproject.toml`` is listed two times, and ``pyproject.lock`` is not mentioned. Based on the [online docs](https://poetry.eustace.io/docs/cli/#install), I believe the first occurrence of ``pyproject.toml`` should be ``pyproject.lock``. </issue> <code> [start of poetry/console/commands/install.py] 1 from .venv_command import VenvCommand 2 3 4 class InstallCommand(VenvCommand): 5 """ 6 Installs the project dependencies. 7 8 install 9 { --no-dev : Do not install dev dependencies. } 10 { --dry-run : Outputs the operations but will not execute anything 11 (implicitly enables --verbose). } 12 { --E|extras=* : Extra sets of dependencies to install. } 13 { --develop=* : Install given packages in development mode. } 14 """ 15 16 help = """The <info>install</info> command reads the <comment>pyproject.toml</> file from 17 the current directory, processes it, and downloads and installs all the 18 libraries and dependencies outlined in that file. If the file does not 19 exist it will look for <comment>pyproject.toml</> and do the same. 20 21 <info>poetry install</info> 22 """ 23 24 _loggers = ["poetry.repositories.pypi_repository"] 25 26 def handle(self): 27 from poetry.installation import Installer 28 29 installer = Installer( 30 self.output, 31 self.venv, 32 self.poetry.package, 33 self.poetry.locker, 34 self.poetry.pool, 35 ) 36 37 extras = [] 38 for extra in self.option("extras"): 39 if " " in extra: 40 extras += [e.strip() for e in extra.split(" ")] 41 else: 42 extras.append(extra) 43 44 installer.extras(extras) 45 installer.dev_mode(not self.option("no-dev")) 46 installer.develop(self.option("develop")) 47 installer.dry_run(self.option("dry-run")) 48 installer.verbose(self.option("verbose")) 49 50 return installer.run() 51 [end of poetry/console/commands/install.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/poetry/console/commands/install.py b/poetry/console/commands/install.py --- a/poetry/console/commands/install.py +++ b/poetry/console/commands/install.py @@ -13,12 +13,12 @@ { --develop=* : Install given packages in development mode. } """ - help = """The <info>install</info> command reads the <comment>pyproject.toml</> file from + help = """The <info>install</info> command reads the <comment>pyproject.lock</> file from the current directory, processes it, and downloads and installs all the libraries and dependencies outlined in that file. If the file does not exist it will look for <comment>pyproject.toml</> and do the same. -<info>poetry install</info> +<info>poetry install</info> """ _loggers = ["poetry.repositories.pypi_repository"]
{"golden_diff": "diff --git a/poetry/console/commands/install.py b/poetry/console/commands/install.py\n--- a/poetry/console/commands/install.py\n+++ b/poetry/console/commands/install.py\n@@ -13,12 +13,12 @@\n { --develop=* : Install given packages in development mode. }\n \"\"\"\n \n- help = \"\"\"The <info>install</info> command reads the <comment>pyproject.toml</> file from\n+ help = \"\"\"The <info>install</info> command reads the <comment>pyproject.lock</> file from\n the current directory, processes it, and downloads and installs all the\n libraries and dependencies outlined in that file. If the file does not\n exist it will look for <comment>pyproject.toml</> and do the same.\n \n-<info>poetry install</info> \n+<info>poetry install</info>\n \"\"\"\n \n _loggers = [\"poetry.repositories.pypi_repository\"]\n", "issue": "CLI help for install is ambiguous\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: Linux 4.4.0-17134-Microsoft #137-Microsoft Thu Jun 14 18:46:00 PST 2018 x86_64 x86_64 x86_64 GNU/Linux\r\n- **Poetry version**: Poetry 0.11.2\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: No such file needed\r\n\r\n## Issue\r\nRunning `` poetry install --help`` produces the following output:\r\n```sh\r\nUsage:\r\n install [options]\r\n\r\nOptions:\r\n --no-dev Do not install dev dependencies.\r\n --dry-run Outputs the operations but will not execute anything (implicitly enables --verbose).\r\n -E, --extras=EXTRAS Extra sets of dependencies to install. (multiple values allowed)\r\n --develop=DEVELOP Install given packages in development mode. (multiple values allowed)\r\n -h, --help Display this help message\r\n -q, --quiet Do not output any message\r\n -V, --version Display this application version\r\n --ansi Force ANSI output\r\n --no-ansi Disable ANSI output\r\n -n, --no-interaction Do not ask any interactive question\r\n -v|vv|vvv, --verbose[=VERBOSE] Increase the verbosity of messages: 1 for normal output, 2 for more verbose output and 3 for debug\r\n\r\nHelp:\r\n The install command reads the pyproject.toml file from\r\n the current directory, processes it, and downloads and installs all the\r\n libraries and dependencies outlined in that file. If the file does not\r\n exist it will look for pyproject.toml and do the same.\r\n\r\n poetry install\r\n```\r\nNote that in the final paragraph, ``pyproject.toml`` is listed two times, and ``pyproject.lock`` is not mentioned. Based on the [online docs](https://poetry.eustace.io/docs/cli/#install), I believe the first occurrence of ``pyproject.toml`` should be ``pyproject.lock``.\n", "before_files": [{"content": "from .venv_command import VenvCommand\n\n\nclass InstallCommand(VenvCommand):\n \"\"\"\n Installs the project dependencies.\n\n install\n { --no-dev : Do not install dev dependencies. }\n { --dry-run : Outputs the operations but will not execute anything\n (implicitly enables --verbose). }\n { --E|extras=* : Extra sets of dependencies to install. }\n { --develop=* : Install given packages in development mode. }\n \"\"\"\n\n help = \"\"\"The <info>install</info> command reads the <comment>pyproject.toml</> file from\nthe current directory, processes it, and downloads and installs all the\nlibraries and dependencies outlined in that file. If the file does not\nexist it will look for <comment>pyproject.toml</> and do the same.\n\n<info>poetry install</info> \n\"\"\"\n\n _loggers = [\"poetry.repositories.pypi_repository\"]\n\n def handle(self):\n from poetry.installation import Installer\n\n installer = Installer(\n self.output,\n self.venv,\n self.poetry.package,\n self.poetry.locker,\n self.poetry.pool,\n )\n\n extras = []\n for extra in self.option(\"extras\"):\n if \" \" in extra:\n extras += [e.strip() for e in extra.split(\" \")]\n else:\n extras.append(extra)\n\n installer.extras(extras)\n installer.dev_mode(not self.option(\"no-dev\"))\n installer.develop(self.option(\"develop\"))\n installer.dry_run(self.option(\"dry-run\"))\n installer.verbose(self.option(\"verbose\"))\n\n return installer.run()\n", "path": "poetry/console/commands/install.py"}]}
1,544
215
gh_patches_debug_19685
rasdani/github-patches
git_diff
quantumlib__Cirq-1345
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make ControlledGate work with gates that only provide a decomposition The following should work: ``` import cirq class G(cirq.TwoQubitGate): def _decompose_(self, qubits): a, b = qubits yield cirq.X(a)**0.5 yield cirq.H(b) yield cirq.CZ(a, b) cg = cirq.ControlledGate(G()) x, y, z = cirq.LineQubit.range(3) c = cirq.Circuit.from_ops(cg(x, y, z)) print(c.to_unitary_matrix()) ``` but currently it raises an exception: ``` Operation without a known matrix or decomposition: cirq.ControlledGate(sub_gate=[...G...].on(cirq.LineQubit(0), cirq.LineQubit(1), cirq.LineQubit(2)) ``` because `ControlledGate` doesn't have a `_decompose_` method. This issue is to add that method. </issue> <code> [start of cirq/ops/controlled_gate.py] 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import Any, Union 16 17 import numpy as np 18 19 from cirq import linalg, protocols, value 20 from cirq.ops import raw_types, controlled_operation as cop 21 from cirq.type_workarounds import NotImplementedType 22 23 24 @value.value_equality 25 class ControlledGate(raw_types.Gate): 26 """Augments existing gates with a control qubit.""" 27 28 def __init__(self, sub_gate: raw_types.Gate) -> None: 29 """Initializes the controlled gate. 30 31 Args: 32 sub_gate: The gate to add a control qubit to. 33 """ 34 self.sub_gate = sub_gate 35 36 def num_qubits(self) -> int: 37 return self.sub_gate.num_qubits() + 1 38 39 def _decompose_(self, qubits): 40 result = protocols.decompose_once_with_qubits(self.sub_gate, 41 qubits[1:], 42 NotImplemented) 43 if result is NotImplemented: 44 return NotImplemented 45 46 return [cop.ControlledOperation(qubits[0], op) for op in result] 47 48 def validate_args(self, qubits) -> None: 49 if len(qubits) < 1: 50 raise ValueError('No control qubit specified.') 51 self.sub_gate.validate_args(qubits[1:]) 52 53 def _value_equality_values_(self): 54 return self.sub_gate 55 56 def _apply_unitary_(self, args: protocols.ApplyUnitaryArgs) -> np.ndarray: 57 control = args.axes[0] 58 rest = args.axes[1:] 59 active = linalg.slice_for_qubits_equal_to([control], 1) 60 sub_axes = [r - int(r > control) for r in rest] 61 target_view = args.target_tensor[active] 62 buffer_view = args.available_buffer[active] 63 result = protocols.apply_unitary( 64 self.sub_gate, 65 protocols.ApplyUnitaryArgs( 66 target_view, 67 buffer_view, 68 sub_axes), 69 default=NotImplemented) 70 71 if result is NotImplemented: 72 return NotImplemented 73 74 if result is target_view: 75 return args.target_tensor 76 77 if result is buffer_view: 78 inactive = linalg.slice_for_qubits_equal_to([control], 0) 79 args.available_buffer[inactive] = args.target_tensor[inactive] 80 return args.available_buffer 81 82 # HACK: assume they didn't somehow escape the slice view and edit the 83 # rest of target_tensor. 84 args.target_tensor[active] = result 85 return args.target_tensor 86 87 def _has_unitary_(self) -> bool: 88 return protocols.has_unitary(self.sub_gate) 89 90 def _unitary_(self) -> Union[np.ndarray, NotImplementedType]: 91 sub_matrix = protocols.unitary(self.sub_gate, None) 92 if sub_matrix is None: 93 return NotImplemented 94 return linalg.block_diag(np.eye(sub_matrix.shape[0]), sub_matrix) 95 96 def __pow__(self, exponent: Any) -> 'ControlledGate': 97 new_sub_gate = protocols.pow(self.sub_gate, 98 exponent, 99 NotImplemented) 100 if new_sub_gate is NotImplemented: 101 return NotImplemented 102 return ControlledGate(new_sub_gate) 103 104 def _is_parameterized_(self): 105 return protocols.is_parameterized(self.sub_gate) 106 107 def _resolve_parameters_(self, param_resolver): 108 new_sub_gate = protocols.resolve_parameters(self.sub_gate, 109 param_resolver) 110 return ControlledGate(new_sub_gate) 111 112 def _trace_distance_bound_(self): 113 return protocols.trace_distance_bound(self.sub_gate) 114 115 def _circuit_diagram_info_(self, 116 args: protocols.CircuitDiagramInfoArgs 117 ) -> protocols.CircuitDiagramInfo: 118 sub_info = protocols.circuit_diagram_info(self.sub_gate, args, None) 119 if sub_info is None: 120 return NotImplemented 121 return protocols.CircuitDiagramInfo( 122 wire_symbols=('@',) + sub_info.wire_symbols, 123 exponent=sub_info.exponent) 124 125 def __str__(self): 126 return 'C' + str(self.sub_gate) 127 128 def __repr__(self): 129 return 'cirq.ControlledGate(sub_gate={!r})'.format(self.sub_gate) 130 131 [end of cirq/ops/controlled_gate.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cirq/ops/controlled_gate.py b/cirq/ops/controlled_gate.py --- a/cirq/ops/controlled_gate.py +++ b/cirq/ops/controlled_gate.py @@ -115,7 +115,18 @@ def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs ) -> protocols.CircuitDiagramInfo: - sub_info = protocols.circuit_diagram_info(self.sub_gate, args, None) + sub_args = protocols.CircuitDiagramInfoArgs( + known_qubit_count=(args.known_qubit_count - 1 + if args.known_qubit_count is not None else None), + known_qubits=(args.known_qubits[1:] + if args.known_qubits is not None else None), + use_unicode_characters=args.use_unicode_characters, + precision=args.precision, + qubit_map=args.qubit_map + ) + sub_info = protocols.circuit_diagram_info(self.sub_gate, + sub_args, + None) if sub_info is None: return NotImplemented return protocols.CircuitDiagramInfo(
{"golden_diff": "diff --git a/cirq/ops/controlled_gate.py b/cirq/ops/controlled_gate.py\n--- a/cirq/ops/controlled_gate.py\n+++ b/cirq/ops/controlled_gate.py\n@@ -115,7 +115,18 @@\n def _circuit_diagram_info_(self,\n args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n- sub_info = protocols.circuit_diagram_info(self.sub_gate, args, None)\n+ sub_args = protocols.CircuitDiagramInfoArgs(\n+ known_qubit_count=(args.known_qubit_count - 1\n+ if args.known_qubit_count is not None else None),\n+ known_qubits=(args.known_qubits[1:]\n+ if args.known_qubits is not None else None),\n+ use_unicode_characters=args.use_unicode_characters,\n+ precision=args.precision,\n+ qubit_map=args.qubit_map\n+ )\n+ sub_info = protocols.circuit_diagram_info(self.sub_gate,\n+ sub_args,\n+ None)\n if sub_info is None:\n return NotImplemented\n return protocols.CircuitDiagramInfo(\n", "issue": "Make ControlledGate work with gates that only provide a decomposition\nThe following should work:\r\n\r\n```\r\nimport cirq\r\n\r\n\r\nclass G(cirq.TwoQubitGate):\r\n def _decompose_(self, qubits):\r\n a, b = qubits\r\n yield cirq.X(a)**0.5\r\n yield cirq.H(b)\r\n yield cirq.CZ(a, b)\r\n\r\n\r\ncg = cirq.ControlledGate(G())\r\nx, y, z = cirq.LineQubit.range(3)\r\nc = cirq.Circuit.from_ops(cg(x, y, z))\r\nprint(c.to_unitary_matrix())\r\n```\r\n\r\nbut currently it raises an exception:\r\n\r\n```\r\nOperation without a known matrix or decomposition: cirq.ControlledGate(sub_gate=[...G...].on(cirq.LineQubit(0), cirq.LineQubit(1), cirq.LineQubit(2))\r\n```\r\n\r\nbecause `ControlledGate` doesn't have a `_decompose_` method. This issue is to add that method.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Union\n\nimport numpy as np\n\nfrom cirq import linalg, protocols, value\nfrom cirq.ops import raw_types, controlled_operation as cop\nfrom cirq.type_workarounds import NotImplementedType\n\n\[email protected]_equality\nclass ControlledGate(raw_types.Gate):\n \"\"\"Augments existing gates with a control qubit.\"\"\"\n\n def __init__(self, sub_gate: raw_types.Gate) -> None:\n \"\"\"Initializes the controlled gate.\n\n Args:\n sub_gate: The gate to add a control qubit to.\n \"\"\"\n self.sub_gate = sub_gate\n\n def num_qubits(self) -> int:\n return self.sub_gate.num_qubits() + 1\n\n def _decompose_(self, qubits):\n result = protocols.decompose_once_with_qubits(self.sub_gate,\n qubits[1:],\n NotImplemented)\n if result is NotImplemented:\n return NotImplemented\n\n return [cop.ControlledOperation(qubits[0], op) for op in result]\n\n def validate_args(self, qubits) -> None:\n if len(qubits) < 1:\n raise ValueError('No control qubit specified.')\n self.sub_gate.validate_args(qubits[1:])\n\n def _value_equality_values_(self):\n return self.sub_gate\n\n def _apply_unitary_(self, args: protocols.ApplyUnitaryArgs) -> np.ndarray:\n control = args.axes[0]\n rest = args.axes[1:]\n active = linalg.slice_for_qubits_equal_to([control], 1)\n sub_axes = [r - int(r > control) for r in rest]\n target_view = args.target_tensor[active]\n buffer_view = args.available_buffer[active]\n result = protocols.apply_unitary(\n self.sub_gate,\n protocols.ApplyUnitaryArgs(\n target_view,\n buffer_view,\n sub_axes),\n default=NotImplemented)\n\n if result is NotImplemented:\n return NotImplemented\n\n if result is target_view:\n return args.target_tensor\n\n if result is buffer_view:\n inactive = linalg.slice_for_qubits_equal_to([control], 0)\n args.available_buffer[inactive] = args.target_tensor[inactive]\n return args.available_buffer\n\n # HACK: assume they didn't somehow escape the slice view and edit the\n # rest of target_tensor.\n args.target_tensor[active] = result\n return args.target_tensor\n\n def _has_unitary_(self) -> bool:\n return protocols.has_unitary(self.sub_gate)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n sub_matrix = protocols.unitary(self.sub_gate, None)\n if sub_matrix is None:\n return NotImplemented\n return linalg.block_diag(np.eye(sub_matrix.shape[0]), sub_matrix)\n\n def __pow__(self, exponent: Any) -> 'ControlledGate':\n new_sub_gate = protocols.pow(self.sub_gate,\n exponent,\n NotImplemented)\n if new_sub_gate is NotImplemented:\n return NotImplemented\n return ControlledGate(new_sub_gate)\n\n def _is_parameterized_(self):\n return protocols.is_parameterized(self.sub_gate)\n\n def _resolve_parameters_(self, param_resolver):\n new_sub_gate = protocols.resolve_parameters(self.sub_gate,\n param_resolver)\n return ControlledGate(new_sub_gate)\n\n def _trace_distance_bound_(self):\n return protocols.trace_distance_bound(self.sub_gate)\n\n def _circuit_diagram_info_(self,\n args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n sub_info = protocols.circuit_diagram_info(self.sub_gate, args, None)\n if sub_info is None:\n return NotImplemented\n return protocols.CircuitDiagramInfo(\n wire_symbols=('@',) + sub_info.wire_symbols,\n exponent=sub_info.exponent)\n\n def __str__(self):\n return 'C' + str(self.sub_gate)\n\n def __repr__(self):\n return 'cirq.ControlledGate(sub_gate={!r})'.format(self.sub_gate)\n\n", "path": "cirq/ops/controlled_gate.py"}]}
2,042
253
gh_patches_debug_133
rasdani/github-patches
git_diff
holoviz__panel-752
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> panel/examples/apps/django2/ example doesn't work The django2 example doesn't work at least for Django 2.2. The interactive plot doesn't show up and there are no clear error messages either. However, the same example provided by ParamBokeh works fine. But I prefer Panel if this problem can be solved. </issue> <code> [start of examples/apps/django2/sliders/bk_sliders.py] 1 import panel as pn 2 3 from .sinewave import SineWave 4 5 def app(doc): 6 sw = SineWave() 7 row = pn.Row(sw.param, sw.plot) 8 row._get_root(doc) 9 [end of examples/apps/django2/sliders/bk_sliders.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/apps/django2/sliders/bk_sliders.py b/examples/apps/django2/sliders/bk_sliders.py --- a/examples/apps/django2/sliders/bk_sliders.py +++ b/examples/apps/django2/sliders/bk_sliders.py @@ -5,4 +5,4 @@ def app(doc): sw = SineWave() row = pn.Row(sw.param, sw.plot) - row._get_root(doc) + row.server_doc(doc)
{"golden_diff": "diff --git a/examples/apps/django2/sliders/bk_sliders.py b/examples/apps/django2/sliders/bk_sliders.py\n--- a/examples/apps/django2/sliders/bk_sliders.py\n+++ b/examples/apps/django2/sliders/bk_sliders.py\n@@ -5,4 +5,4 @@\n def app(doc):\n sw = SineWave()\n row = pn.Row(sw.param, sw.plot)\n- row._get_root(doc)\n+ row.server_doc(doc)\n", "issue": "panel/examples/apps/django2/ example doesn't work\nThe django2 example doesn't work at least for Django 2.2. The interactive plot doesn't show up and there are no clear error messages either. However, the same example provided by ParamBokeh works fine. But I prefer Panel if this problem can be solved. \n", "before_files": [{"content": "import panel as pn\n\nfrom .sinewave import SineWave\n\ndef app(doc):\n sw = SineWave()\n row = pn.Row(sw.param, sw.plot)\n row._get_root(doc)\n", "path": "examples/apps/django2/sliders/bk_sliders.py"}]}
674
108
gh_patches_debug_11398
rasdani/github-patches
git_diff
dotkom__onlineweb4-741
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ability to format company-url ![screenshot from 2014-01-29 17 32 37](https://f.cloud.github.com/assets/1758308/2031524/225447e4-8903-11e3-9de0-90237ad655ba.png) Like in this case, this ugly url should be formated to vimsa.no or the like. </issue> <code> [start of apps/companyprofile/models.py] 1 from django.db import models 2 from django.utils.translation import ugettext_lazy as _ 3 from filebrowser.fields import FileBrowseField 4 5 class Company(models.Model): 6 7 IMAGE_FOLDER = "images/companies" 8 IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png', '.tif', '.tiff'] 9 10 11 name = models.CharField(_(u"bedriftsnavn"), max_length=100) 12 short_description = models.TextField(_(u"kort beskrivelse"), max_length=200) 13 long_description = models.TextField(_(u"utdypende beskrivelse"), blank=True, null=True) 14 image = FileBrowseField(_(u"bilde"), 15 max_length=200, directory=IMAGE_FOLDER, 16 extensions=IMAGE_EXTENSIONS, null=False, blank=False) 17 site = models.URLField(_(u"hjemmeside")) 18 email_address = models.EmailField(_(u"epostaddresse"), max_length=75, blank=True, null=True) 19 phone_number = models.CharField(_(u"telefonnummer"), max_length=20, blank=True, null=True) 20 21 def __unicode__(self): 22 return self.name 23 24 class Meta: 25 verbose_name = _(u"Bedrift") 26 verbose_name_plural = _(u"Bedrifter") 27 [end of apps/companyprofile/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/companyprofile/models.py b/apps/companyprofile/models.py --- a/apps/companyprofile/models.py +++ b/apps/companyprofile/models.py @@ -14,7 +14,7 @@ image = FileBrowseField(_(u"bilde"), max_length=200, directory=IMAGE_FOLDER, extensions=IMAGE_EXTENSIONS, null=False, blank=False) - site = models.URLField(_(u"hjemmeside")) + site = models.CharField(_(u"hjemmeside"), max_length=100) email_address = models.EmailField(_(u"epostaddresse"), max_length=75, blank=True, null=True) phone_number = models.CharField(_(u"telefonnummer"), max_length=20, blank=True, null=True)
{"golden_diff": "diff --git a/apps/companyprofile/models.py b/apps/companyprofile/models.py\n--- a/apps/companyprofile/models.py\n+++ b/apps/companyprofile/models.py\n@@ -14,7 +14,7 @@\n image = FileBrowseField(_(u\"bilde\"), \n max_length=200, directory=IMAGE_FOLDER,\n extensions=IMAGE_EXTENSIONS, null=False, blank=False)\n- site = models.URLField(_(u\"hjemmeside\"))\n+ site = models.CharField(_(u\"hjemmeside\"), max_length=100)\n email_address = models.EmailField(_(u\"epostaddresse\"), max_length=75, blank=True, null=True)\n phone_number = models.CharField(_(u\"telefonnummer\"), max_length=20, blank=True, null=True)\n", "issue": "Ability to format company-url\n![screenshot from 2014-01-29 17 32 37](https://f.cloud.github.com/assets/1758308/2031524/225447e4-8903-11e3-9de0-90237ad655ba.png)\nLike in this case, this ugly url should be formated to vimsa.no or the like. \n\n", "before_files": [{"content": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom filebrowser.fields import FileBrowseField\n\nclass Company(models.Model):\n\n IMAGE_FOLDER = \"images/companies\"\n IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png', '.tif', '.tiff']\n\n\n name = models.CharField(_(u\"bedriftsnavn\"), max_length=100)\n short_description = models.TextField(_(u\"kort beskrivelse\"), max_length=200)\n long_description = models.TextField(_(u\"utdypende beskrivelse\"), blank=True, null=True)\n image = FileBrowseField(_(u\"bilde\"), \n max_length=200, directory=IMAGE_FOLDER,\n extensions=IMAGE_EXTENSIONS, null=False, blank=False)\n site = models.URLField(_(u\"hjemmeside\"))\n email_address = models.EmailField(_(u\"epostaddresse\"), max_length=75, blank=True, null=True)\n phone_number = models.CharField(_(u\"telefonnummer\"), max_length=20, blank=True, null=True)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n verbose_name = _(u\"Bedrift\")\n verbose_name_plural = _(u\"Bedrifter\")\n", "path": "apps/companyprofile/models.py"}]}
976
174
gh_patches_debug_2803
rasdani/github-patches
git_diff
sopel-irc__sopel-1261
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Results from currency conversion should be rounded to 2 decimals At the moment, the results of a currency conversion query is reported as a float: < mynick> .cur 100 SEK to USD < mybot> 100.0 SEK (Swedish krona) = 12.202017114914426 USD (US dollar) As far as I know, no currency reports more than 2 decimals, and some even less (JPY comes to mind). Rounding the result to 2 decimals will result in more readable output. < mynick> .cur 100 SEK to USD < mybot> 100.0 SEK (Swedish krona) = 12.20 USD (US dollar) </issue> <code> [start of sopel/modules/currency.py] 1 # coding=utf-8 2 # Copyright 2013 Elsie Powell, embolalia.com 3 # Licensed under the Eiffel Forum License 2 4 from __future__ import unicode_literals, absolute_import, print_function, division 5 6 import re 7 8 from requests import get 9 from sopel.module import commands, example, NOLIMIT 10 11 # The Canadian central bank has better exchange rate data than the Fed, the 12 # Bank of England, or the European Central Bank. Who knew? 13 base_url = 'http://www.bankofcanada.ca/stats/assets/rates_rss/noon/en_{}.xml' 14 regex = re.compile(r''' 15 (\d+(?:\.\d+)?) # Decimal number 16 \s*([a-zA-Z]{3}) # 3-letter currency code 17 \s+(?:in|as|of|to)\s+ # preposition 18 ([a-zA-Z]{3}) # 3-letter currency code 19 ''', re.VERBOSE) 20 21 22 def get_rate(code): 23 code = code.upper() 24 if code == 'CAD': 25 return 1, 'Canadian Dollar' 26 elif code == 'BTC': 27 btc_rate = get('https://apiv2.bitcoinaverage.com/indices/global/ticker/BTCCAD') 28 rates = btc_rate.json() 29 return 1 / rates['averages']['day'], 'Bitcoin—24hr average' 30 31 data = get("http://www.bankofcanada.ca/valet/observations/FX{}CAD/json".format(code)) 32 name = data.json()['seriesDetail']['FX{}CAD'.format(code)]['description'] 33 name = name.split(" to Canadian")[0] 34 json = data.json()['observations'] 35 for element in reversed(json): 36 if 'v' in element['FX{}CAD'.format(code)]: 37 return 1 / float(element['FX{}CAD'.format(code)]['v']), name 38 39 40 @commands('cur', 'currency', 'exchange') 41 @example('.cur 20 EUR in USD') 42 def exchange(bot, trigger): 43 """Show the exchange rate between two currencies""" 44 if not trigger.group(2): 45 return bot.reply("No search term. An example: .cur 20 EUR in USD") 46 match = regex.match(trigger.group(2)) 47 if not match: 48 # It's apologetic, because it's using Canadian data. 49 bot.reply("Sorry, I didn't understand the input.") 50 return NOLIMIT 51 52 amount, of, to = match.groups() 53 try: 54 amount = float(amount) 55 except: 56 bot.reply("Sorry, I didn't understand the input.") 57 display(bot, amount, of, to) 58 59 60 def display(bot, amount, of, to): 61 if not amount: 62 bot.reply("Zero is zero, no matter what country you're in.") 63 try: 64 of_rate, of_name = get_rate(of) 65 if not of_name: 66 bot.reply("Unknown currency: %s" % of) 67 return 68 to_rate, to_name = get_rate(to) 69 if not to_name: 70 bot.reply("Unknown currency: %s" % to) 71 return 72 except Exception: 73 bot.reply("Something went wrong while I was getting the exchange rate.") 74 return NOLIMIT 75 76 result = amount / of_rate * to_rate 77 bot.say("{} {} ({}) = {} {} ({})".format(amount, of.upper(), of_name, 78 result, to.upper(), to_name)) 79 80 81 @commands('btc', 'bitcoin') 82 @example('.btc 20 EUR') 83 def bitcoin(bot, trigger): 84 #if 2 args, 1st is number and 2nd is currency. If 1 arg, it's either the number or the currency. 85 to = trigger.group(4) 86 amount = trigger.group(3) 87 if not to: 88 to = trigger.group(3) or 'USD' 89 amount = 1 90 91 try: 92 amount = float(amount) 93 except: 94 bot.reply("Sorry, I didn't understand the input.") 95 return NOLIMIT 96 97 display(bot, amount, 'BTC', to) 98 [end of sopel/modules/currency.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sopel/modules/currency.py b/sopel/modules/currency.py --- a/sopel/modules/currency.py +++ b/sopel/modules/currency.py @@ -74,7 +74,7 @@ return NOLIMIT result = amount / of_rate * to_rate - bot.say("{} {} ({}) = {} {} ({})".format(amount, of.upper(), of_name, + bot.say("{:.2f} {} ({}) = {:.2f} {} ({})".format(amount, of.upper(), of_name, result, to.upper(), to_name))
{"golden_diff": "diff --git a/sopel/modules/currency.py b/sopel/modules/currency.py\n--- a/sopel/modules/currency.py\n+++ b/sopel/modules/currency.py\n@@ -74,7 +74,7 @@\n return NOLIMIT\n \n result = amount / of_rate * to_rate\n- bot.say(\"{} {} ({}) = {} {} ({})\".format(amount, of.upper(), of_name,\n+ bot.say(\"{:.2f} {} ({}) = {:.2f} {} ({})\".format(amount, of.upper(), of_name,\n result, to.upper(), to_name))\n", "issue": "Results from currency conversion should be rounded to 2 decimals\nAt the moment, the results of a currency conversion query is reported as a float:\r\n\r\n < mynick> .cur 100 SEK to USD\r\n < mybot> 100.0 SEK (Swedish krona) = 12.202017114914426 USD (US dollar)\r\n\r\nAs far as I know, no currency reports more than 2 decimals, and some even less (JPY comes to mind). Rounding the result to 2 decimals will result in more readable output.\r\n\r\n < mynick> .cur 100 SEK to USD\r\n < mybot> 100.0 SEK (Swedish krona) = 12.20 USD (US dollar)\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2013 Elsie Powell, embolalia.com\n# Licensed under the Eiffel Forum License 2\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\n\nfrom requests import get\nfrom sopel.module import commands, example, NOLIMIT\n\n# The Canadian central bank has better exchange rate data than the Fed, the\n# Bank of England, or the European Central Bank. Who knew?\nbase_url = 'http://www.bankofcanada.ca/stats/assets/rates_rss/noon/en_{}.xml'\nregex = re.compile(r'''\n (\\d+(?:\\.\\d+)?) # Decimal number\n \\s*([a-zA-Z]{3}) # 3-letter currency code\n \\s+(?:in|as|of|to)\\s+ # preposition\n ([a-zA-Z]{3}) # 3-letter currency code\n ''', re.VERBOSE)\n\n\ndef get_rate(code):\n code = code.upper()\n if code == 'CAD':\n return 1, 'Canadian Dollar'\n elif code == 'BTC':\n btc_rate = get('https://apiv2.bitcoinaverage.com/indices/global/ticker/BTCCAD')\n rates = btc_rate.json()\n return 1 / rates['averages']['day'], 'Bitcoin\u201424hr average'\n\n data = get(\"http://www.bankofcanada.ca/valet/observations/FX{}CAD/json\".format(code))\n name = data.json()['seriesDetail']['FX{}CAD'.format(code)]['description']\n name = name.split(\" to Canadian\")[0]\n json = data.json()['observations']\n for element in reversed(json):\n if 'v' in element['FX{}CAD'.format(code)]:\n return 1 / float(element['FX{}CAD'.format(code)]['v']), name\n\n\n@commands('cur', 'currency', 'exchange')\n@example('.cur 20 EUR in USD')\ndef exchange(bot, trigger):\n \"\"\"Show the exchange rate between two currencies\"\"\"\n if not trigger.group(2):\n return bot.reply(\"No search term. An example: .cur 20 EUR in USD\")\n match = regex.match(trigger.group(2))\n if not match:\n # It's apologetic, because it's using Canadian data.\n bot.reply(\"Sorry, I didn't understand the input.\")\n return NOLIMIT\n\n amount, of, to = match.groups()\n try:\n amount = float(amount)\n except:\n bot.reply(\"Sorry, I didn't understand the input.\")\n display(bot, amount, of, to)\n\n\ndef display(bot, amount, of, to):\n if not amount:\n bot.reply(\"Zero is zero, no matter what country you're in.\")\n try:\n of_rate, of_name = get_rate(of)\n if not of_name:\n bot.reply(\"Unknown currency: %s\" % of)\n return\n to_rate, to_name = get_rate(to)\n if not to_name:\n bot.reply(\"Unknown currency: %s\" % to)\n return\n except Exception:\n bot.reply(\"Something went wrong while I was getting the exchange rate.\")\n return NOLIMIT\n\n result = amount / of_rate * to_rate\n bot.say(\"{} {} ({}) = {} {} ({})\".format(amount, of.upper(), of_name,\n result, to.upper(), to_name))\n\n\n@commands('btc', 'bitcoin')\n@example('.btc 20 EUR')\ndef bitcoin(bot, trigger):\n #if 2 args, 1st is number and 2nd is currency. If 1 arg, it's either the number or the currency.\n to = trigger.group(4)\n amount = trigger.group(3)\n if not to:\n to = trigger.group(3) or 'USD'\n amount = 1\n\n try:\n amount = float(amount)\n except:\n bot.reply(\"Sorry, I didn't understand the input.\")\n return NOLIMIT\n\n display(bot, amount, 'BTC', to)\n", "path": "sopel/modules/currency.py"}]}
1,788
132
gh_patches_debug_445
rasdani/github-patches
git_diff
docker__docker-py-1156
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Requests v2.11.0 causes breakage This is a known issue within requests and looks like it will be fixed in v2.11.1. Documenting for the benefit of anyone else who runs into this :). After a recent pip upgrade I found that docker-py was passing along an error from requests: File "/usr/local/lib/python2.7/dist-packages/docker/client.py", line 307, in _stream_raw_result for out in response.iter_content(chunk_size=1, decode_unicode=True): File "/usr/local/lib/python2.7/dist-packages/requests/utils.py", line 372, in stream_decode_response_unicode raise UnicodeError("Unable to decode contents with encoding %s." % encoding) UnicodeError: Unable to decode contents with encoding None. This has already been reported to requests (https://github.com/kennethreitz/requests/issues/3481) and fixed (https://github.com/kennethreitz/requests/commit/d7f56ba9383575a6b7d361db0123a93c70a2b42f) for the next version. If you are running into this issue, the easiest fix for now appears to be reverting to a pre 2.11 version of requests. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import os 3 import sys 4 5 from setuptools import setup 6 7 8 ROOT_DIR = os.path.dirname(__file__) 9 SOURCE_DIR = os.path.join(ROOT_DIR) 10 11 requirements = [ 12 'requests >= 2.5.2', 13 'six >= 1.4.0', 14 'websocket-client >= 0.32.0', 15 ] 16 17 if sys.platform == 'win32': 18 requirements.append('pypiwin32 >= 219') 19 20 extras_require = { 21 ':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5', 22 ':python_version < "3.3"': 'ipaddress >= 1.0.16', 23 } 24 25 version = None 26 exec(open('docker/version.py').read()) 27 28 with open('./test-requirements.txt') as test_reqs_txt: 29 test_requirements = [line for line in test_reqs_txt] 30 31 32 setup( 33 name="docker-py", 34 version=version, 35 description="Python client for Docker.", 36 url='https://github.com/docker/docker-py/', 37 packages=[ 38 'docker', 'docker.api', 'docker.auth', 'docker.transport', 39 'docker.utils', 'docker.utils.ports', 'docker.ssladapter' 40 ], 41 install_requires=requirements, 42 tests_require=test_requirements, 43 extras_require=extras_require, 44 zip_safe=False, 45 test_suite='tests', 46 classifiers=[ 47 'Development Status :: 4 - Beta', 48 'Environment :: Other Environment', 49 'Intended Audience :: Developers', 50 'Operating System :: OS Independent', 51 'Programming Language :: Python', 52 'Programming Language :: Python :: 2', 53 'Programming Language :: Python :: 2.6', 54 'Programming Language :: Python :: 2.7', 55 'Programming Language :: Python :: 3', 56 'Programming Language :: Python :: 3.3', 57 'Programming Language :: Python :: 3.4', 58 'Programming Language :: Python :: 3.5', 59 'Topic :: Utilities', 60 'License :: OSI Approved :: Apache Software License', 61 ], 62 ) 63 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ SOURCE_DIR = os.path.join(ROOT_DIR) requirements = [ - 'requests >= 2.5.2', + 'requests >= 2.5.2, < 2.11', 'six >= 1.4.0', 'websocket-client >= 0.32.0', ]
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n SOURCE_DIR = os.path.join(ROOT_DIR)\n \n requirements = [\n- 'requests >= 2.5.2',\n+ 'requests >= 2.5.2, < 2.11',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n ]\n", "issue": "Requests v2.11.0 causes breakage\nThis is a known issue within requests and looks like it will be fixed in v2.11.1. Documenting for the benefit of anyone else who runs into this :).\n\nAfter a recent pip upgrade I found that docker-py was passing along an error from requests:\n\nFile \"/usr/local/lib/python2.7/dist-packages/docker/client.py\", line 307, in _stream_raw_result\n for out in response.iter_content(chunk_size=1, decode_unicode=True):\n File \"/usr/local/lib/python2.7/dist-packages/requests/utils.py\", line 372, in stream_decode_response_unicode\n raise UnicodeError(\"Unable to decode contents with encoding %s.\" % encoding)\nUnicodeError: Unable to decode contents with encoding None.\n\nThis has already been reported to requests (https://github.com/kennethreitz/requests/issues/3481) and fixed (https://github.com/kennethreitz/requests/commit/d7f56ba9383575a6b7d361db0123a93c70a2b42f) for the next version.\n\nIf you are running into this issue, the easiest fix for now appears to be reverting to a pre 2.11 version of requests.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'requests >= 2.5.2',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n]\n\nif sys.platform == 'win32':\n requirements.append('pypiwin32 >= 219')\n\nextras_require = {\n ':python_version < \"3.5\"': 'backports.ssl_match_hostname >= 3.5',\n ':python_version < \"3.3\"': 'ipaddress >= 1.0.16',\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nsetup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n url='https://github.com/docker/docker-py/',\n packages=[\n 'docker', 'docker.api', 'docker.auth', 'docker.transport',\n 'docker.utils', 'docker.utils.ports', 'docker.ssladapter'\n ],\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n)\n", "path": "setup.py"}]}
1,375
100
gh_patches_debug_39038
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3314
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider kum_and_go is broken During the global build at 2021-08-25-14-42-15, spider **kum_and_go** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/logs/kum_and_go.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/kum_and_go.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/kum_and_go.geojson)) </issue> <code> [start of locations/spiders/kum_and_go.py] 1 # -*- coding: utf-8 -*- 2 import scrapy 3 import json 4 5 from locations.items import GeojsonPointItem 6 from locations.hours import OpeningHours 7 8 9 class KumAndGoSpider(scrapy.Spider): 10 name = "kum_and_go" 11 item_attributes = {'brand': "Kum & Go", 'brand_wikidata': "Q6443340"} 12 allowed_domains = ["kumandgo.com"] 13 14 def start_requests(self): 15 yield scrapy.FormRequest( 16 'https://www.kumandgo.com/wordpress/wp-admin/admin-ajax.php', 17 method='POST', 18 formdata={ 19 'coords[latitude]': '39.74581290359507', 20 'coords[longitude]': '-104.96756559990148', 21 'radius': '3000', 22 'action': 'stores_coords' 23 }) 24 25 def parse(self, response): 26 result = json.loads(response.body_as_unicode()) 27 for store in result['data']['stores']: 28 opening_hours = OpeningHours() 29 30 for hours_key in [ 31 'monday_hours', 32 'tuesday_hours', 33 'wednesday_hours', 34 'thursday_hours', 35 'friday_hours', 36 'saturday_hours', 37 'sunday_hours' 38 ]: 39 (open_time, close_time) = store[hours_key].split(' - ') 40 41 opening_hours.add_range(day=hours_key[:2].capitalize(), 42 open_time=open_time, 43 close_time=close_time, 44 time_format='%I:%M %p') 45 46 yield GeojsonPointItem( 47 ref=store['store_id'], 48 lon=store['longitude'], 49 lat=store['latitude'], 50 name=store['name'], 51 addr_full=store['address1'], 52 city=store['city'], 53 state=store['state'], 54 postcode=store['postalcode'], 55 country=store['country'], 56 phone=store['phone'], 57 website=store['url'], 58 opening_hours=opening_hours.as_opening_hours(), 59 extras={ 60 'amenity:fuel': True, 61 'atm': int(store['atm'] or 0) == 1, 62 'car_wash': int(store['car_wash'] or 0) == 1, 63 'fuel:diesel': int(store['diesel'] or 0) == 1, 64 'fuel:e85': int(store['e85'] or 0) == 1, 65 'hgv': int(store['semi_truck_fuel_island'] or 0) == 1, 66 } 67 ) 68 [end of locations/spiders/kum_and_go.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/kum_and_go.py b/locations/spiders/kum_and_go.py --- a/locations/spiders/kum_and_go.py +++ b/locations/spiders/kum_and_go.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import csv import scrapy import json @@ -12,56 +13,27 @@ allowed_domains = ["kumandgo.com"] def start_requests(self): - yield scrapy.FormRequest( - 'https://www.kumandgo.com/wordpress/wp-admin/admin-ajax.php', - method='POST', - formdata={ - 'coords[latitude]': '39.74581290359507', - 'coords[longitude]': '-104.96756559990148', - 'radius': '3000', - 'action': 'stores_coords' - }) + with open('./locations/searchable_points/us_centroids_100mile_radius_state.csv') as points: + reader = csv.DictReader(points) + for point in reader: + if point['state'] in ('IA', 'AR', 'CO', 'MN', 'MO', 'MT', 'NE', 'ND', 'OK', 'SD', 'WY'): + yield scrapy.Request( + f'https://www.kumandgo.com/wordpress/wp-admin/admin-ajax.php?action=store_search&lat={point["latitude"]}&lng={point["longitude"]}&max_results=100&search_radius=100', + ) def parse(self, response): - result = json.loads(response.body_as_unicode()) - for store in result['data']['stores']: - opening_hours = OpeningHours() - - for hours_key in [ - 'monday_hours', - 'tuesday_hours', - 'wednesday_hours', - 'thursday_hours', - 'friday_hours', - 'saturday_hours', - 'sunday_hours' - ]: - (open_time, close_time) = store[hours_key].split(' - ') - - opening_hours.add_range(day=hours_key[:2].capitalize(), - open_time=open_time, - close_time=close_time, - time_format='%I:%M %p') + result = json.loads(response.text) + for store in result: yield GeojsonPointItem( - ref=store['store_id'], - lon=store['longitude'], - lat=store['latitude'], - name=store['name'], - addr_full=store['address1'], + ref=store['id'], + lon=store['lng'], + lat=store['lat'], + addr_full=store['address'], city=store['city'], state=store['state'], - postcode=store['postalcode'], + postcode=store['zip'], country=store['country'], phone=store['phone'], - website=store['url'], - opening_hours=opening_hours.as_opening_hours(), - extras={ - 'amenity:fuel': True, - 'atm': int(store['atm'] or 0) == 1, - 'car_wash': int(store['car_wash'] or 0) == 1, - 'fuel:diesel': int(store['diesel'] or 0) == 1, - 'fuel:e85': int(store['e85'] or 0) == 1, - 'hgv': int(store['semi_truck_fuel_island'] or 0) == 1, - } + website=store['permalink'], )
{"golden_diff": "diff --git a/locations/spiders/kum_and_go.py b/locations/spiders/kum_and_go.py\n--- a/locations/spiders/kum_and_go.py\n+++ b/locations/spiders/kum_and_go.py\n@@ -1,4 +1,5 @@\n # -*- coding: utf-8 -*-\n+import csv\n import scrapy\n import json\n \n@@ -12,56 +13,27 @@\n allowed_domains = [\"kumandgo.com\"]\n \n def start_requests(self):\n- yield scrapy.FormRequest(\n- 'https://www.kumandgo.com/wordpress/wp-admin/admin-ajax.php',\n- method='POST',\n- formdata={\n- 'coords[latitude]': '39.74581290359507',\n- 'coords[longitude]': '-104.96756559990148',\n- 'radius': '3000',\n- 'action': 'stores_coords'\n- })\n+ with open('./locations/searchable_points/us_centroids_100mile_radius_state.csv') as points:\n+ reader = csv.DictReader(points)\n+ for point in reader:\n+ if point['state'] in ('IA', 'AR', 'CO', 'MN', 'MO', 'MT', 'NE', 'ND', 'OK', 'SD', 'WY'):\n+ yield scrapy.Request(\n+ f'https://www.kumandgo.com/wordpress/wp-admin/admin-ajax.php?action=store_search&lat={point[\"latitude\"]}&lng={point[\"longitude\"]}&max_results=100&search_radius=100',\n+ )\n \n def parse(self, response):\n- result = json.loads(response.body_as_unicode())\n- for store in result['data']['stores']:\n- opening_hours = OpeningHours()\n-\n- for hours_key in [\n- 'monday_hours',\n- 'tuesday_hours',\n- 'wednesday_hours',\n- 'thursday_hours',\n- 'friday_hours',\n- 'saturday_hours',\n- 'sunday_hours'\n- ]:\n- (open_time, close_time) = store[hours_key].split(' - ')\n-\n- opening_hours.add_range(day=hours_key[:2].capitalize(),\n- open_time=open_time,\n- close_time=close_time,\n- time_format='%I:%M %p')\n+ result = json.loads(response.text)\n+ for store in result:\n \n yield GeojsonPointItem(\n- ref=store['store_id'],\n- lon=store['longitude'],\n- lat=store['latitude'],\n- name=store['name'],\n- addr_full=store['address1'],\n+ ref=store['id'],\n+ lon=store['lng'],\n+ lat=store['lat'],\n+ addr_full=store['address'],\n city=store['city'],\n state=store['state'],\n- postcode=store['postalcode'],\n+ postcode=store['zip'],\n country=store['country'],\n phone=store['phone'],\n- website=store['url'],\n- opening_hours=opening_hours.as_opening_hours(),\n- extras={\n- 'amenity:fuel': True,\n- 'atm': int(store['atm'] or 0) == 1,\n- 'car_wash': int(store['car_wash'] or 0) == 1,\n- 'fuel:diesel': int(store['diesel'] or 0) == 1,\n- 'fuel:e85': int(store['e85'] or 0) == 1,\n- 'hgv': int(store['semi_truck_fuel_island'] or 0) == 1,\n- }\n+ website=store['permalink'],\n )\n", "issue": "Spider kum_and_go is broken\nDuring the global build at 2021-08-25-14-42-15, spider **kum_and_go** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/logs/kum_and_go.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/kum_and_go.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/kum_and_go.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass KumAndGoSpider(scrapy.Spider):\n name = \"kum_and_go\"\n item_attributes = {'brand': \"Kum & Go\", 'brand_wikidata': \"Q6443340\"}\n allowed_domains = [\"kumandgo.com\"]\n\n def start_requests(self):\n yield scrapy.FormRequest(\n 'https://www.kumandgo.com/wordpress/wp-admin/admin-ajax.php',\n method='POST',\n formdata={\n 'coords[latitude]': '39.74581290359507',\n 'coords[longitude]': '-104.96756559990148',\n 'radius': '3000',\n 'action': 'stores_coords'\n })\n\n def parse(self, response):\n result = json.loads(response.body_as_unicode())\n for store in result['data']['stores']:\n opening_hours = OpeningHours()\n\n for hours_key in [\n 'monday_hours',\n 'tuesday_hours',\n 'wednesday_hours',\n 'thursday_hours',\n 'friday_hours',\n 'saturday_hours',\n 'sunday_hours'\n ]:\n (open_time, close_time) = store[hours_key].split(' - ')\n\n opening_hours.add_range(day=hours_key[:2].capitalize(),\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M %p')\n\n yield GeojsonPointItem(\n ref=store['store_id'],\n lon=store['longitude'],\n lat=store['latitude'],\n name=store['name'],\n addr_full=store['address1'],\n city=store['city'],\n state=store['state'],\n postcode=store['postalcode'],\n country=store['country'],\n phone=store['phone'],\n website=store['url'],\n opening_hours=opening_hours.as_opening_hours(),\n extras={\n 'amenity:fuel': True,\n 'atm': int(store['atm'] or 0) == 1,\n 'car_wash': int(store['car_wash'] or 0) == 1,\n 'fuel:diesel': int(store['diesel'] or 0) == 1,\n 'fuel:e85': int(store['e85'] or 0) == 1,\n 'hgv': int(store['semi_truck_fuel_island'] or 0) == 1,\n }\n )\n", "path": "locations/spiders/kum_and_go.py"}]}
1,428
828
gh_patches_debug_27197
rasdani/github-patches
git_diff
ansible__ansible-modules-core-3886
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> junos_facts - config parameter should have a default value ##### COMPONENT NAME module : junos_facts ``` network/junos_facts.py ``` ##### ANSIBLE VERSION 2.1.0.0 ##### OS / ENVIRONMENT Mac OS 10.11.5 ##### SUMMARY Currently the parameter `config` for junos_facts is mandatory and do not have a default value I think it would be better to set a default value to `false` and not make this parameter mandatory. Playbooks will be simplified I'm happy to send a pull request for it, but I wanted to discuss this change before </issue> <code> [start of network/junos/junos_facts.py] 1 #!/usr/bin/python 2 # 3 # This file is part of Ansible 4 # 5 # Ansible is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # Ansible is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 17 # 18 19 DOCUMENTATION = """ 20 --- 21 module: junos_facts 22 version_added: "2.1" 23 author: "Peter Sprygada (@privateip)" 24 short_description: Collect facts from remote device running Junos 25 description: 26 - Collects fact information from a remote device running the Junos 27 operating system. By default, the module will collect basic fact 28 information from the device to be included with the hostvars. 29 Additional fact information can be collected based on the 30 configured set of arguments. 31 extends_documentation_fragment: junos 32 options: 33 config: 34 description: 35 - The C(config) argument instructs the fact module to collect 36 the configuration from the remote device. The configuration 37 is then included in return facts. By default, the configuration 38 is returned as text. The C(config_format) can be used to return 39 different Junos configuration formats. 40 required: true 41 default: false 42 config_format: 43 description: 44 - The C(config_format) argument is used to specify the desired 45 format of the configuration file. Devices support three 46 configuration file formats. By default, the configuration 47 from the device is returned as text. The other options include 48 set and xml. If the xml option is choosen, the configuration file 49 is returned as both xml and json. 50 required: false 51 default: text 52 choices: ['xml', 'text', 'set'] 53 requirements: 54 - junos-eznc 55 notes: 56 - This module requires the netconf system service be enabled on 57 the remote device being managed 58 """ 59 60 EXAMPLES = """ 61 # the required set of connection arguments have been purposely left off 62 # the examples for brevity 63 64 - name: collect default set of facts 65 junos_facts: 66 67 - name: collect default set of facts and configuration 68 junos_facts: 69 config: yes 70 71 - name: collect default set of facts and configuration in set format 72 junos_facts: 73 config: yes 74 config_format: set 75 76 - name: collect default set of facts and configuration in XML and JSON format 77 junos_facts: 78 config: yes 79 config_format: xml 80 """ 81 82 RETURN = """ 83 ansible_facts: 84 descrption: Returns the facts collect from the device 85 returned: always 86 type: dict 87 """ 88 89 def main(): 90 """ Main entry point for AnsibleModule 91 """ 92 spec = dict( 93 config=dict(required=True, type='bool'), 94 config_format=dict(default='text', choices=['xml', 'set', 'text']), 95 transport=dict(default='netconf', choices=['netconf']) 96 ) 97 98 module = get_module(argument_spec=spec, 99 supports_check_mode=True) 100 101 result = dict(changed=False) 102 103 facts = module.get_facts() 104 105 if '2RE' in facts: 106 facts['has_2RE'] = facts['2RE'] 107 del facts['2RE'] 108 109 facts['version_info'] = dict(facts['version_info']) 110 111 if module.params['config']: 112 config_format = module.params['config_format'] 113 resp_config = module.get_config( config_format=config_format) 114 115 if config_format in ['text', 'set']: 116 facts['config'] = resp_config 117 elif config_format == "xml": 118 facts['config'] = xml_to_string(resp_config) 119 facts['config_json'] = xml_to_json(resp_config) 120 121 result['ansible_facts'] = facts 122 module.exit_json(**result) 123 124 from ansible.module_utils.basic import * 125 from ansible.module_utils.junos import * 126 127 if __name__ == '__main__': 128 main() 129 [end of network/junos/junos_facts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/network/junos/junos_facts.py b/network/junos/junos_facts.py --- a/network/junos/junos_facts.py +++ b/network/junos/junos_facts.py @@ -37,8 +37,8 @@ is then included in return facts. By default, the configuration is returned as text. The C(config_format) can be used to return different Junos configuration formats. - required: true - default: false + required: false + default: null config_format: description: - The C(config_format) argument is used to specify the desired @@ -90,7 +90,7 @@ """ Main entry point for AnsibleModule """ spec = dict( - config=dict(required=True, type='bool'), + config=dict(type='bool'), config_format=dict(default='text', choices=['xml', 'set', 'text']), transport=dict(default='netconf', choices=['netconf']) ) @@ -108,7 +108,7 @@ facts['version_info'] = dict(facts['version_info']) - if module.params['config']: + if module.params['config'] is True: config_format = module.params['config_format'] resp_config = module.get_config( config_format=config_format)
{"golden_diff": "diff --git a/network/junos/junos_facts.py b/network/junos/junos_facts.py\n--- a/network/junos/junos_facts.py\n+++ b/network/junos/junos_facts.py\n@@ -37,8 +37,8 @@\n is then included in return facts. By default, the configuration\n is returned as text. The C(config_format) can be used to return\n different Junos configuration formats.\n- required: true\n- default: false\n+ required: false\n+ default: null\n config_format:\n description:\n - The C(config_format) argument is used to specify the desired\n@@ -90,7 +90,7 @@\n \"\"\" Main entry point for AnsibleModule\n \"\"\"\n spec = dict(\n- config=dict(required=True, type='bool'),\n+ config=dict(type='bool'),\n config_format=dict(default='text', choices=['xml', 'set', 'text']),\n transport=dict(default='netconf', choices=['netconf'])\n )\n@@ -108,7 +108,7 @@\n \n facts['version_info'] = dict(facts['version_info'])\n \n- if module.params['config']:\n+ if module.params['config'] is True:\n config_format = module.params['config_format']\n resp_config = module.get_config( config_format=config_format)\n", "issue": "junos_facts - config parameter should have a default value\n##### COMPONENT NAME\n\nmodule : junos_facts\n\n```\nnetwork/junos_facts.py\n```\n##### ANSIBLE VERSION\n\n2.1.0.0\n##### OS / ENVIRONMENT\n\nMac OS 10.11.5\n##### SUMMARY\n\nCurrently the parameter `config` for junos_facts is mandatory and do not have a default value\nI think it would be better to set a default value to `false` and not make this parameter mandatory.\n\nPlaybooks will be simplified\n\nI'm happy to send a pull request for it, but I wanted to discuss this change before\n\n", "before_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = \"\"\"\n---\nmodule: junos_facts\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Collect facts from remote device running Junos\ndescription:\n - Collects fact information from a remote device running the Junos\n operating system. By default, the module will collect basic fact\n information from the device to be included with the hostvars.\n Additional fact information can be collected based on the\n configured set of arguments.\nextends_documentation_fragment: junos\noptions:\n config:\n description:\n - The C(config) argument instructs the fact module to collect\n the configuration from the remote device. The configuration\n is then included in return facts. By default, the configuration\n is returned as text. The C(config_format) can be used to return\n different Junos configuration formats.\n required: true\n default: false\n config_format:\n description:\n - The C(config_format) argument is used to specify the desired\n format of the configuration file. Devices support three\n configuration file formats. By default, the configuration\n from the device is returned as text. The other options include\n set and xml. If the xml option is choosen, the configuration file\n is returned as both xml and json.\n required: false\n default: text\n choices: ['xml', 'text', 'set']\nrequirements:\n - junos-eznc\nnotes:\n - This module requires the netconf system service be enabled on\n the remote device being managed\n\"\"\"\n\nEXAMPLES = \"\"\"\n# the required set of connection arguments have been purposely left off\n# the examples for brevity\n\n- name: collect default set of facts\n junos_facts:\n\n- name: collect default set of facts and configuration\n junos_facts:\n config: yes\n\n- name: collect default set of facts and configuration in set format\n junos_facts:\n config: yes\n config_format: set\n\n- name: collect default set of facts and configuration in XML and JSON format\n junos_facts:\n config: yes\n config_format: xml\n\"\"\"\n\nRETURN = \"\"\"\nansible_facts:\n descrption: Returns the facts collect from the device\n returned: always\n type: dict\n\"\"\"\n\ndef main():\n \"\"\" Main entry point for AnsibleModule\n \"\"\"\n spec = dict(\n config=dict(required=True, type='bool'),\n config_format=dict(default='text', choices=['xml', 'set', 'text']),\n transport=dict(default='netconf', choices=['netconf'])\n )\n\n module = get_module(argument_spec=spec,\n supports_check_mode=True)\n\n result = dict(changed=False)\n\n facts = module.get_facts()\n\n if '2RE' in facts:\n facts['has_2RE'] = facts['2RE']\n del facts['2RE']\n\n facts['version_info'] = dict(facts['version_info'])\n\n if module.params['config']:\n config_format = module.params['config_format']\n resp_config = module.get_config( config_format=config_format)\n\n if config_format in ['text', 'set']:\n facts['config'] = resp_config\n elif config_format == \"xml\":\n facts['config'] = xml_to_string(resp_config)\n facts['config_json'] = xml_to_json(resp_config)\n\n result['ansible_facts'] = facts\n module.exit_json(**result)\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.junos import *\n\nif __name__ == '__main__':\n main()\n", "path": "network/junos/junos_facts.py"}]}
1,899
296
gh_patches_debug_5836
rasdani/github-patches
git_diff
sanic-org__sanic-961
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cookie secure option not encoded properly When `Cookies.encode` encounters `response.cookies["<cookie>"]["secure"] = False` then it outputs: `b'Domain=xad.com; Path=/; Secure=False'` where it should output: `b'Domain=xad.com; Path=/;'` when `response.cookies["<cookie>"]["secure"] = False` and `b'Domain=xad.com; Path=/; Secure;'` when `response.cookies["<cookie>"]["secure"] = True` </issue> <code> [start of sanic/cookies.py] 1 import re 2 import string 3 4 # ------------------------------------------------------------ # 5 # SimpleCookie 6 # ------------------------------------------------------------ # 7 8 # Straight up copied this section of dark magic from SimpleCookie 9 10 _LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:" 11 _UnescapedChars = _LegalChars + ' ()/<=>?@[]{}' 12 13 _Translator = {n: '\\%03o' % n 14 for n in set(range(256)) - set(map(ord, _UnescapedChars))} 15 _Translator.update({ 16 ord('"'): '\\"', 17 ord('\\'): '\\\\', 18 }) 19 20 21 def _quote(str): 22 """Quote a string for use in a cookie header. 23 If the string does not need to be double-quoted, then just return the 24 string. Otherwise, surround the string in doublequotes and quote 25 (with a \) special characters. 26 """ 27 if str is None or _is_legal_key(str): 28 return str 29 else: 30 return '"' + str.translate(_Translator) + '"' 31 32 33 _is_legal_key = re.compile('[%s]+' % re.escape(_LegalChars)).fullmatch 34 35 # ------------------------------------------------------------ # 36 # Custom SimpleCookie 37 # ------------------------------------------------------------ # 38 39 40 class CookieJar(dict): 41 """CookieJar dynamically writes headers as cookies are added and removed 42 It gets around the limitation of one header per name by using the 43 MultiHeader class to provide a unique key that encodes to Set-Cookie. 44 """ 45 46 def __init__(self, headers): 47 super().__init__() 48 self.headers = headers 49 self.cookie_headers = {} 50 51 def __setitem__(self, key, value): 52 # If this cookie doesn't exist, add it to the header keys 53 cookie_header = self.cookie_headers.get(key) 54 if not cookie_header: 55 cookie = Cookie(key, value) 56 cookie['path'] = '/' 57 cookie_header = MultiHeader("Set-Cookie") 58 self.cookie_headers[key] = cookie_header 59 self.headers[cookie_header] = cookie 60 return super().__setitem__(key, cookie) 61 else: 62 self[key].value = value 63 64 def __delitem__(self, key): 65 if key not in self.cookie_headers: 66 self[key] = '' 67 self[key]['max-age'] = 0 68 else: 69 cookie_header = self.cookie_headers[key] 70 del self.headers[cookie_header] 71 del self.cookie_headers[key] 72 return super().__delitem__(key) 73 74 75 class Cookie(dict): 76 """A stripped down version of Morsel from SimpleCookie #gottagofast""" 77 _keys = { 78 "expires": "expires", 79 "path": "Path", 80 "comment": "Comment", 81 "domain": "Domain", 82 "max-age": "Max-Age", 83 "secure": "Secure", 84 "httponly": "HttpOnly", 85 "version": "Version", 86 } 87 _flags = {'secure', 'httponly'} 88 89 def __init__(self, key, value): 90 if key in self._keys: 91 raise KeyError("Cookie name is a reserved word") 92 if not _is_legal_key(key): 93 raise KeyError("Cookie key contains illegal characters") 94 self.key = key 95 self.value = value 96 super().__init__() 97 98 def __setitem__(self, key, value): 99 if key not in self._keys: 100 raise KeyError("Unknown cookie property") 101 return super().__setitem__(key, value) 102 103 def encode(self, encoding): 104 output = ['%s=%s' % (self.key, _quote(self.value))] 105 for key, value in self.items(): 106 if key == 'max-age': 107 try: 108 output.append('%s=%d' % (self._keys[key], value)) 109 except TypeError: 110 output.append('%s=%s' % (self._keys[key], value)) 111 elif key == 'expires': 112 try: 113 output.append('%s=%s' % ( 114 self._keys[key], 115 value.strftime("%a, %d-%b-%Y %T GMT") 116 )) 117 except AttributeError: 118 output.append('%s=%s' % (self._keys[key], value)) 119 elif key in self._flags and self[key]: 120 output.append(self._keys[key]) 121 else: 122 output.append('%s=%s' % (self._keys[key], value)) 123 124 return "; ".join(output).encode(encoding) 125 126 # ------------------------------------------------------------ # 127 # Header Trickery 128 # ------------------------------------------------------------ # 129 130 131 class MultiHeader: 132 """String-holding object which allow us to set a header within response 133 that has a unique key, but may contain duplicate header names 134 """ 135 def __init__(self, name): 136 self.name = name 137 138 def encode(self): 139 return self.name.encode() 140 [end of sanic/cookies.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sanic/cookies.py b/sanic/cookies.py --- a/sanic/cookies.py +++ b/sanic/cookies.py @@ -98,7 +98,8 @@ def __setitem__(self, key, value): if key not in self._keys: raise KeyError("Unknown cookie property") - return super().__setitem__(key, value) + if value is not False: + return super().__setitem__(key, value) def encode(self, encoding): output = ['%s=%s' % (self.key, _quote(self.value))]
{"golden_diff": "diff --git a/sanic/cookies.py b/sanic/cookies.py\n--- a/sanic/cookies.py\n+++ b/sanic/cookies.py\n@@ -98,7 +98,8 @@\n def __setitem__(self, key, value):\n if key not in self._keys:\n raise KeyError(\"Unknown cookie property\")\n- return super().__setitem__(key, value)\n+ if value is not False:\n+ return super().__setitem__(key, value)\n \n def encode(self, encoding):\n output = ['%s=%s' % (self.key, _quote(self.value))]\n", "issue": "Cookie secure option not encoded properly\nWhen `Cookies.encode` encounters `response.cookies[\"<cookie>\"][\"secure\"] = False` then it outputs:\r\n\r\n`b'Domain=xad.com; Path=/; Secure=False'`\r\n\r\nwhere it should output:\r\n\r\n`b'Domain=xad.com; Path=/;'` when `response.cookies[\"<cookie>\"][\"secure\"] = False`\r\n\r\nand\r\n\r\n`b'Domain=xad.com; Path=/; Secure;'` when `response.cookies[\"<cookie>\"][\"secure\"] = True`\n", "before_files": [{"content": "import re\nimport string\n\n# ------------------------------------------------------------ #\n# SimpleCookie\n# ------------------------------------------------------------ #\n\n# Straight up copied this section of dark magic from SimpleCookie\n\n_LegalChars = string.ascii_letters + string.digits + \"!#$%&'*+-.^_`|~:\"\n_UnescapedChars = _LegalChars + ' ()/<=>?@[]{}'\n\n_Translator = {n: '\\\\%03o' % n\n for n in set(range(256)) - set(map(ord, _UnescapedChars))}\n_Translator.update({\n ord('\"'): '\\\\\"',\n ord('\\\\'): '\\\\\\\\',\n})\n\n\ndef _quote(str):\n \"\"\"Quote a string for use in a cookie header.\n If the string does not need to be double-quoted, then just return the\n string. Otherwise, surround the string in doublequotes and quote\n (with a \\) special characters.\n \"\"\"\n if str is None or _is_legal_key(str):\n return str\n else:\n return '\"' + str.translate(_Translator) + '\"'\n\n\n_is_legal_key = re.compile('[%s]+' % re.escape(_LegalChars)).fullmatch\n\n# ------------------------------------------------------------ #\n# Custom SimpleCookie\n# ------------------------------------------------------------ #\n\n\nclass CookieJar(dict):\n \"\"\"CookieJar dynamically writes headers as cookies are added and removed\n It gets around the limitation of one header per name by using the\n MultiHeader class to provide a unique key that encodes to Set-Cookie.\n \"\"\"\n\n def __init__(self, headers):\n super().__init__()\n self.headers = headers\n self.cookie_headers = {}\n\n def __setitem__(self, key, value):\n # If this cookie doesn't exist, add it to the header keys\n cookie_header = self.cookie_headers.get(key)\n if not cookie_header:\n cookie = Cookie(key, value)\n cookie['path'] = '/'\n cookie_header = MultiHeader(\"Set-Cookie\")\n self.cookie_headers[key] = cookie_header\n self.headers[cookie_header] = cookie\n return super().__setitem__(key, cookie)\n else:\n self[key].value = value\n\n def __delitem__(self, key):\n if key not in self.cookie_headers:\n self[key] = ''\n self[key]['max-age'] = 0\n else:\n cookie_header = self.cookie_headers[key]\n del self.headers[cookie_header]\n del self.cookie_headers[key]\n return super().__delitem__(key)\n\n\nclass Cookie(dict):\n \"\"\"A stripped down version of Morsel from SimpleCookie #gottagofast\"\"\"\n _keys = {\n \"expires\": \"expires\",\n \"path\": \"Path\",\n \"comment\": \"Comment\",\n \"domain\": \"Domain\",\n \"max-age\": \"Max-Age\",\n \"secure\": \"Secure\",\n \"httponly\": \"HttpOnly\",\n \"version\": \"Version\",\n }\n _flags = {'secure', 'httponly'}\n\n def __init__(self, key, value):\n if key in self._keys:\n raise KeyError(\"Cookie name is a reserved word\")\n if not _is_legal_key(key):\n raise KeyError(\"Cookie key contains illegal characters\")\n self.key = key\n self.value = value\n super().__init__()\n\n def __setitem__(self, key, value):\n if key not in self._keys:\n raise KeyError(\"Unknown cookie property\")\n return super().__setitem__(key, value)\n\n def encode(self, encoding):\n output = ['%s=%s' % (self.key, _quote(self.value))]\n for key, value in self.items():\n if key == 'max-age':\n try:\n output.append('%s=%d' % (self._keys[key], value))\n except TypeError:\n output.append('%s=%s' % (self._keys[key], value))\n elif key == 'expires':\n try:\n output.append('%s=%s' % (\n self._keys[key],\n value.strftime(\"%a, %d-%b-%Y %T GMT\")\n ))\n except AttributeError:\n output.append('%s=%s' % (self._keys[key], value))\n elif key in self._flags and self[key]:\n output.append(self._keys[key])\n else:\n output.append('%s=%s' % (self._keys[key], value))\n\n return \"; \".join(output).encode(encoding)\n\n# ------------------------------------------------------------ #\n# Header Trickery\n# ------------------------------------------------------------ #\n\n\nclass MultiHeader:\n \"\"\"String-holding object which allow us to set a header within response\n that has a unique key, but may contain duplicate header names\n \"\"\"\n def __init__(self, name):\n self.name = name\n\n def encode(self):\n return self.name.encode()\n", "path": "sanic/cookies.py"}]}
1,979
133
gh_patches_debug_39301
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-114
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Handle routing takeover at client Currently, routing is handled by Django. When the user visits the application initially, the routing should be handled by Django, and after the application loads, the routing should be taken over by client. </issue> <code> [start of mathesar/urls.py] 1 from django.urls import include, path 2 from rest_framework_nested import routers 3 4 from mathesar.views import api, frontend 5 6 7 router = routers.DefaultRouter() 8 router.register(r'tables', api.TableViewSet) 9 router.register(r'schemas', api.SchemaViewSet) 10 router.register(r'database_keys', api.DatabaseKeyViewSet, basename='database_keys') 11 12 records_router = routers.NestedSimpleRouter(router, r'tables', lookup='table') 13 records_router.register(r'records', api.RecordViewSet, basename='table-records') 14 15 urlpatterns = [ 16 path('', frontend.index, name="index"), 17 path( 18 'tables/<int:pk>/', 19 frontend.TableDetail.as_view(), 20 name='frontend-table-detail', 21 ), 22 path('api/v0/', include(router.urls)), 23 path('api/v0/', include(records_router.urls)), 24 ] 25 [end of mathesar/urls.py] [start of mathesar/views/frontend.py] 1 from django.http import HttpResponseRedirect 2 from django.shortcuts import render 3 from django.urls import reverse 4 from django.views.generic import DetailView 5 6 from mathesar.forms.forms import UploadFileForm 7 from mathesar.imports.csv import create_table_from_csv 8 from mathesar.models import Table, Schema 9 from mathesar.serializers import SchemaSerializer 10 11 12 def index(request): 13 tables = Table.objects.all() 14 if request.method == "POST": 15 form = UploadFileForm(request.POST, request.FILES) 16 if form.is_valid(): 17 table = create_table_from_csv( 18 name=form.cleaned_data["table_name"], 19 schema=form.cleaned_data["schema_name"], 20 database_key=form.cleaned_data["database_key"], 21 csv_file=request.FILES["file"] 22 ) 23 return HttpResponseRedirect( 24 reverse("frontend-table-detail", kwargs={"pk": table.id}) 25 ) 26 else: 27 form = UploadFileForm() 28 schema_serializer = SchemaSerializer(Schema.objects.all(), many=True, context={'request': request}) 29 return render( 30 request, 31 "mathesar/index.html", 32 { 33 "form": form, 34 "tables": sorted(tables, key=lambda x: x.schema.name), 35 "schema_data": schema_serializer.data 36 }, 37 ) 38 39 40 class TableDetail(DetailView): 41 context_object_name = "table" 42 queryset = Table.objects.all() 43 [end of mathesar/views/frontend.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/urls.py b/mathesar/urls.py --- a/mathesar/urls.py +++ b/mathesar/urls.py @@ -14,11 +14,7 @@ urlpatterns = [ path('', frontend.index, name="index"), - path( - 'tables/<int:pk>/', - frontend.TableDetail.as_view(), - name='frontend-table-detail', - ), + path('tables/<int:pk>', frontend.table, name="table"), path('api/v0/', include(router.urls)), path('api/v0/', include(records_router.urls)), ] diff --git a/mathesar/views/frontend.py b/mathesar/views/frontend.py --- a/mathesar/views/frontend.py +++ b/mathesar/views/frontend.py @@ -1,16 +1,22 @@ -from django.http import HttpResponseRedirect +from django.http import JsonResponse from django.shortcuts import render -from django.urls import reverse -from django.views.generic import DetailView from mathesar.forms.forms import UploadFileForm from mathesar.imports.csv import create_table_from_csv from mathesar.models import Table, Schema -from mathesar.serializers import SchemaSerializer +from mathesar.serializers import SchemaSerializer, TableSerializer, RecordSerializer +from mathesar.database.utils import get_non_default_database_keys + + +def get_common_data(request): + schema_serializer = SchemaSerializer(Schema.objects.all(), many=True, context={'request': request}) + return { + "schemas": schema_serializer.data, + "databases": get_non_default_database_keys(), + } def index(request): - tables = Table.objects.all() if request.method == "POST": form = UploadFileForm(request.POST, request.FILES) if form.is_valid(): @@ -20,23 +26,32 @@ database_key=form.cleaned_data["database_key"], csv_file=request.FILES["file"] ) - return HttpResponseRedirect( - reverse("frontend-table-detail", kwargs={"pk": table.id}) - ) - else: - form = UploadFileForm() - schema_serializer = SchemaSerializer(Schema.objects.all(), many=True, context={'request': request}) + return JsonResponse({"pk": table.id}, status=200) return render( request, "mathesar/index.html", { - "form": form, - "tables": sorted(tables, key=lambda x: x.schema.name), - "schema_data": schema_serializer.data - }, + "common_data": get_common_data(request), + } ) -class TableDetail(DetailView): - context_object_name = "table" - queryset = Table.objects.all() +def table(request, pk): + try: + table_data = Table.objects.get(pk=pk) + table_serialized = TableSerializer(table_data, context={'request': request}).data + records_serialized = RecordSerializer(table_data.get_records(limit=50, offset=0), many=True, context={'request': request}).data + except Table.DoesNotExist: + table_serialized = {} + records_serialized = [] + return render( + request, + "mathesar/index.html", + { + "common_data": get_common_data(request), + "route_specific_data": { + "table-detail": table_serialized, + "table-records": records_serialized + } + } + )
{"golden_diff": "diff --git a/mathesar/urls.py b/mathesar/urls.py\n--- a/mathesar/urls.py\n+++ b/mathesar/urls.py\n@@ -14,11 +14,7 @@\n \n urlpatterns = [\n path('', frontend.index, name=\"index\"),\n- path(\n- 'tables/<int:pk>/',\n- frontend.TableDetail.as_view(),\n- name='frontend-table-detail',\n- ),\n+ path('tables/<int:pk>', frontend.table, name=\"table\"),\n path('api/v0/', include(router.urls)),\n path('api/v0/', include(records_router.urls)),\n ]\ndiff --git a/mathesar/views/frontend.py b/mathesar/views/frontend.py\n--- a/mathesar/views/frontend.py\n+++ b/mathesar/views/frontend.py\n@@ -1,16 +1,22 @@\n-from django.http import HttpResponseRedirect\n+from django.http import JsonResponse\n from django.shortcuts import render\n-from django.urls import reverse\n-from django.views.generic import DetailView\n \n from mathesar.forms.forms import UploadFileForm\n from mathesar.imports.csv import create_table_from_csv\n from mathesar.models import Table, Schema\n-from mathesar.serializers import SchemaSerializer\n+from mathesar.serializers import SchemaSerializer, TableSerializer, RecordSerializer\n+from mathesar.database.utils import get_non_default_database_keys\n+\n+\n+def get_common_data(request):\n+ schema_serializer = SchemaSerializer(Schema.objects.all(), many=True, context={'request': request})\n+ return {\n+ \"schemas\": schema_serializer.data,\n+ \"databases\": get_non_default_database_keys(),\n+ }\n \n \n def index(request):\n- tables = Table.objects.all()\n if request.method == \"POST\":\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n@@ -20,23 +26,32 @@\n database_key=form.cleaned_data[\"database_key\"],\n csv_file=request.FILES[\"file\"]\n )\n- return HttpResponseRedirect(\n- reverse(\"frontend-table-detail\", kwargs={\"pk\": table.id})\n- )\n- else:\n- form = UploadFileForm()\n- schema_serializer = SchemaSerializer(Schema.objects.all(), many=True, context={'request': request})\n+ return JsonResponse({\"pk\": table.id}, status=200)\n return render(\n request,\n \"mathesar/index.html\",\n {\n- \"form\": form,\n- \"tables\": sorted(tables, key=lambda x: x.schema.name),\n- \"schema_data\": schema_serializer.data\n- },\n+ \"common_data\": get_common_data(request),\n+ }\n )\n \n \n-class TableDetail(DetailView):\n- context_object_name = \"table\"\n- queryset = Table.objects.all()\n+def table(request, pk):\n+ try:\n+ table_data = Table.objects.get(pk=pk)\n+ table_serialized = TableSerializer(table_data, context={'request': request}).data\n+ records_serialized = RecordSerializer(table_data.get_records(limit=50, offset=0), many=True, context={'request': request}).data\n+ except Table.DoesNotExist:\n+ table_serialized = {}\n+ records_serialized = []\n+ return render(\n+ request,\n+ \"mathesar/index.html\",\n+ {\n+ \"common_data\": get_common_data(request),\n+ \"route_specific_data\": {\n+ \"table-detail\": table_serialized,\n+ \"table-records\": records_serialized\n+ }\n+ }\n+ )\n", "issue": "Handle routing takeover at client\nCurrently, routing is handled by Django. When the user visits the application initially, the routing should be handled by Django, and after the application loads, the routing should be taken over by client.\n", "before_files": [{"content": "from django.urls import include, path\nfrom rest_framework_nested import routers\n\nfrom mathesar.views import api, frontend\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'tables', api.TableViewSet)\nrouter.register(r'schemas', api.SchemaViewSet)\nrouter.register(r'database_keys', api.DatabaseKeyViewSet, basename='database_keys')\n\nrecords_router = routers.NestedSimpleRouter(router, r'tables', lookup='table')\nrecords_router.register(r'records', api.RecordViewSet, basename='table-records')\n\nurlpatterns = [\n path('', frontend.index, name=\"index\"),\n path(\n 'tables/<int:pk>/',\n frontend.TableDetail.as_view(),\n name='frontend-table-detail',\n ),\n path('api/v0/', include(router.urls)),\n path('api/v0/', include(records_router.urls)),\n]\n", "path": "mathesar/urls.py"}, {"content": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.views.generic import DetailView\n\nfrom mathesar.forms.forms import UploadFileForm\nfrom mathesar.imports.csv import create_table_from_csv\nfrom mathesar.models import Table, Schema\nfrom mathesar.serializers import SchemaSerializer\n\n\ndef index(request):\n tables = Table.objects.all()\n if request.method == \"POST\":\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n table = create_table_from_csv(\n name=form.cleaned_data[\"table_name\"],\n schema=form.cleaned_data[\"schema_name\"],\n database_key=form.cleaned_data[\"database_key\"],\n csv_file=request.FILES[\"file\"]\n )\n return HttpResponseRedirect(\n reverse(\"frontend-table-detail\", kwargs={\"pk\": table.id})\n )\n else:\n form = UploadFileForm()\n schema_serializer = SchemaSerializer(Schema.objects.all(), many=True, context={'request': request})\n return render(\n request,\n \"mathesar/index.html\",\n {\n \"form\": form,\n \"tables\": sorted(tables, key=lambda x: x.schema.name),\n \"schema_data\": schema_serializer.data\n },\n )\n\n\nclass TableDetail(DetailView):\n context_object_name = \"table\"\n queryset = Table.objects.all()\n", "path": "mathesar/views/frontend.py"}]}
1,166
737
gh_patches_debug_19303
rasdani/github-patches
git_diff
netbox-community__netbox-5286
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Plugins must define app_name in api/urls.py to be included in API root view ### Environment * Python version: 3.6.9 * NetBox version: 2.9.7 ### Steps to Reproduce I'm actually not certain whether this is better classified as a bug or a lapse in the documentation, but I'm opting to propose it as a bug because I think it can be addressed by a code change. 1. Create/install a plugin which provides at least one REST API endpoint. (Assume the plugin's name is `myplugin`.) 2. Within `api/urls.py`, set `app_name = myplugin-api` 3. Visit `/api/plugins/` in the browser. The plugin should be included. 4. Remove the `app_name` definition from `api/urls.py`. 5. Visit `/api/plugins/` in the browser again. The plugin no longer appears. ### Expected Behavior Plugin API endpoints should be detected automatically. AFAICT there's no reason a plugin should need to declare `app_name`. (The core apps do this, but only because they're being included dynamically.) ### Observed Behavior Plugin API endpoints disappear from the list when `app_name` is not declared. It seems like the need for the `app_name` variable could be obviated within `PluginsAPIRootView._get_plugin_entry()` by changing ``` api_app_name = import_object(f"{plugin}.api.urls.app_name") ``` to ``` api_app_name = f'{app_config.name}-api' ``` This actually raises another point: the full URL name for a plugin API endpoint is currently in the format `plugins-api:myplugin-api:url-name`, which seems a bit unwieldy: `plugins-api:myplugin:url-name` should suffice. However, this would be a breaking change for any plugins which rely on reverse URL resolution to their REST API endpoints. </issue> <code> [start of netbox/extras/plugins/views.py] 1 from collections import OrderedDict 2 3 from django.apps import apps 4 from django.conf import settings 5 from django.shortcuts import render 6 from django.urls.exceptions import NoReverseMatch 7 from django.views.generic import View 8 from rest_framework import permissions 9 from rest_framework.response import Response 10 from rest_framework.reverse import reverse 11 from rest_framework.views import APIView 12 13 from extras.plugins.utils import import_object 14 15 16 class InstalledPluginsAdminView(View): 17 """ 18 Admin view for listing all installed plugins 19 """ 20 def get(self, request): 21 plugins = [apps.get_app_config(plugin) for plugin in settings.PLUGINS] 22 return render(request, 'extras/admin/plugins_list.html', { 23 'plugins': plugins, 24 }) 25 26 27 class InstalledPluginsAPIView(APIView): 28 """ 29 API view for listing all installed plugins 30 """ 31 permission_classes = [permissions.IsAdminUser] 32 _ignore_model_permissions = True 33 exclude_from_schema = True 34 swagger_schema = None 35 36 def get_view_name(self): 37 return "Installed Plugins" 38 39 @staticmethod 40 def _get_plugin_data(plugin_app_config): 41 return { 42 'name': plugin_app_config.verbose_name, 43 'package': plugin_app_config.name, 44 'author': plugin_app_config.author, 45 'author_email': plugin_app_config.author_email, 46 'description': plugin_app_config.description, 47 'verison': plugin_app_config.version 48 } 49 50 def get(self, request, format=None): 51 return Response([self._get_plugin_data(apps.get_app_config(plugin)) for plugin in settings.PLUGINS]) 52 53 54 class PluginsAPIRootView(APIView): 55 _ignore_model_permissions = True 56 exclude_from_schema = True 57 swagger_schema = None 58 59 def get_view_name(self): 60 return "Plugins" 61 62 @staticmethod 63 def _get_plugin_entry(plugin, app_config, request, format): 64 # Check if the plugin specifies any API URLs 65 api_app_name = import_object(f"{plugin}.api.urls.app_name") 66 if api_app_name is None: 67 # Plugin does not expose an API 68 return None 69 70 try: 71 entry = (getattr(app_config, 'base_url', app_config.label), reverse( 72 f"plugins-api:{api_app_name}:api-root", 73 request=request, 74 format=format 75 )) 76 except NoReverseMatch: 77 # The plugin does not include an api-root url 78 entry = None 79 80 return entry 81 82 def get(self, request, format=None): 83 84 entries = [] 85 for plugin in settings.PLUGINS: 86 app_config = apps.get_app_config(plugin) 87 entry = self._get_plugin_entry(plugin, app_config, request, format) 88 if entry is not None: 89 entries.append(entry) 90 91 return Response(OrderedDict(( 92 ('installed-plugins', reverse('plugins-api:plugins-list', request=request, format=format)), 93 *entries 94 ))) 95 [end of netbox/extras/plugins/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/extras/plugins/views.py b/netbox/extras/plugins/views.py --- a/netbox/extras/plugins/views.py +++ b/netbox/extras/plugins/views.py @@ -10,8 +10,6 @@ from rest_framework.reverse import reverse from rest_framework.views import APIView -from extras.plugins.utils import import_object - class InstalledPluginsAdminView(View): """ @@ -62,11 +60,7 @@ @staticmethod def _get_plugin_entry(plugin, app_config, request, format): # Check if the plugin specifies any API URLs - api_app_name = import_object(f"{plugin}.api.urls.app_name") - if api_app_name is None: - # Plugin does not expose an API - return None - + api_app_name = f'{app_config.name}-api' try: entry = (getattr(app_config, 'base_url', app_config.label), reverse( f"plugins-api:{api_app_name}:api-root",
{"golden_diff": "diff --git a/netbox/extras/plugins/views.py b/netbox/extras/plugins/views.py\n--- a/netbox/extras/plugins/views.py\n+++ b/netbox/extras/plugins/views.py\n@@ -10,8 +10,6 @@\n from rest_framework.reverse import reverse\n from rest_framework.views import APIView\n \n-from extras.plugins.utils import import_object\n-\n \n class InstalledPluginsAdminView(View):\n \"\"\"\n@@ -62,11 +60,7 @@\n @staticmethod\n def _get_plugin_entry(plugin, app_config, request, format):\n # Check if the plugin specifies any API URLs\n- api_app_name = import_object(f\"{plugin}.api.urls.app_name\")\n- if api_app_name is None:\n- # Plugin does not expose an API\n- return None\n-\n+ api_app_name = f'{app_config.name}-api'\n try:\n entry = (getattr(app_config, 'base_url', app_config.label), reverse(\n f\"plugins-api:{api_app_name}:api-root\",\n", "issue": "Plugins must define app_name in api/urls.py to be included in API root view\n### Environment\r\n* Python version: 3.6.9\r\n* NetBox version: 2.9.7\r\n\r\n### Steps to Reproduce\r\nI'm actually not certain whether this is better classified as a bug or a lapse in the documentation, but I'm opting to propose it as a bug because I think it can be addressed by a code change.\r\n\r\n1. Create/install a plugin which provides at least one REST API endpoint. (Assume the plugin's name is `myplugin`.)\r\n2. Within `api/urls.py`, set `app_name = myplugin-api`\r\n3. Visit `/api/plugins/` in the browser. The plugin should be included.\r\n4. Remove the `app_name` definition from `api/urls.py`.\r\n5. Visit `/api/plugins/` in the browser again. The plugin no longer appears.\r\n\r\n### Expected Behavior\r\nPlugin API endpoints should be detected automatically. AFAICT there's no reason a plugin should need to declare `app_name`. (The core apps do this, but only because they're being included dynamically.)\r\n\r\n### Observed Behavior\r\nPlugin API endpoints disappear from the list when `app_name` is not declared.\r\n\r\nIt seems like the need for the `app_name` variable could be obviated within `PluginsAPIRootView._get_plugin_entry()` by changing\r\n\r\n```\r\napi_app_name = import_object(f\"{plugin}.api.urls.app_name\")\r\n```\r\n\r\nto\r\n\r\n```\r\napi_app_name = f'{app_config.name}-api'\r\n```\r\n\r\nThis actually raises another point: the full URL name for a plugin API endpoint is currently in the format `plugins-api:myplugin-api:url-name`, which seems a bit unwieldy: `plugins-api:myplugin:url-name` should suffice. However, this would be a breaking change for any plugins which rely on reverse URL resolution to their REST API endpoints.\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.urls.exceptions import NoReverseMatch\nfrom django.views.generic import View\nfrom rest_framework import permissions\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework.views import APIView\n\nfrom extras.plugins.utils import import_object\n\n\nclass InstalledPluginsAdminView(View):\n \"\"\"\n Admin view for listing all installed plugins\n \"\"\"\n def get(self, request):\n plugins = [apps.get_app_config(plugin) for plugin in settings.PLUGINS]\n return render(request, 'extras/admin/plugins_list.html', {\n 'plugins': plugins,\n })\n\n\nclass InstalledPluginsAPIView(APIView):\n \"\"\"\n API view for listing all installed plugins\n \"\"\"\n permission_classes = [permissions.IsAdminUser]\n _ignore_model_permissions = True\n exclude_from_schema = True\n swagger_schema = None\n\n def get_view_name(self):\n return \"Installed Plugins\"\n\n @staticmethod\n def _get_plugin_data(plugin_app_config):\n return {\n 'name': plugin_app_config.verbose_name,\n 'package': plugin_app_config.name,\n 'author': plugin_app_config.author,\n 'author_email': plugin_app_config.author_email,\n 'description': plugin_app_config.description,\n 'verison': plugin_app_config.version\n }\n\n def get(self, request, format=None):\n return Response([self._get_plugin_data(apps.get_app_config(plugin)) for plugin in settings.PLUGINS])\n\n\nclass PluginsAPIRootView(APIView):\n _ignore_model_permissions = True\n exclude_from_schema = True\n swagger_schema = None\n\n def get_view_name(self):\n return \"Plugins\"\n\n @staticmethod\n def _get_plugin_entry(plugin, app_config, request, format):\n # Check if the plugin specifies any API URLs\n api_app_name = import_object(f\"{plugin}.api.urls.app_name\")\n if api_app_name is None:\n # Plugin does not expose an API\n return None\n\n try:\n entry = (getattr(app_config, 'base_url', app_config.label), reverse(\n f\"plugins-api:{api_app_name}:api-root\",\n request=request,\n format=format\n ))\n except NoReverseMatch:\n # The plugin does not include an api-root url\n entry = None\n\n return entry\n\n def get(self, request, format=None):\n\n entries = []\n for plugin in settings.PLUGINS:\n app_config = apps.get_app_config(plugin)\n entry = self._get_plugin_entry(plugin, app_config, request, format)\n if entry is not None:\n entries.append(entry)\n\n return Response(OrderedDict((\n ('installed-plugins', reverse('plugins-api:plugins-list', request=request, format=format)),\n *entries\n )))\n", "path": "netbox/extras/plugins/views.py"}]}
1,726
220
gh_patches_debug_8842
rasdani/github-patches
git_diff
deepset-ai__haystack-1620
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Connection Error. Is Haystack running? JSON Issue with Docker Compose On Fresh Repo and Image Pulls ### Discussed in https://github.com/deepset-ai/haystack/discussions/1617 <div type='discussions-op-text'> <sup>Originally posted by **sieu-tran** October 19, 2021</sup> Between October 18, 2021 and October 19, 2021, something has changed and the docker returns the error: "Connection Error. Is Haystack running?" when we start running it.</div> </issue> <code> [start of ui/utils.py] 1 import os 2 3 import logging 4 import requests 5 import streamlit as st 6 7 API_ENDPOINT = os.getenv("API_ENDPOINT", "http://localhost:8000") 8 STATUS = "initialized" 9 DOC_REQUEST = "query" 10 DOC_FEEDBACK = "feedback" 11 DOC_UPLOAD = "file-upload" 12 13 14 def haystack_is_ready(): 15 url = f"{API_ENDPOINT}/{STATUS}" 16 try: 17 if requests.get(url).json(): 18 return True 19 except Exception as e: 20 logging.exception(e) 21 return False 22 23 24 @st.cache(show_spinner=False) 25 def retrieve_doc(query, filters=None, top_k_reader=5, top_k_retriever=5): 26 # Query Haystack API 27 url = f"{API_ENDPOINT}/{DOC_REQUEST}" 28 params = {"filters": filters, "ESRetriever": {"top_k": top_k_retriever}, "Reader": {"top_k": top_k_reader}} 29 req = {"query": query, "params": params} 30 response_raw = requests.post(url, json=req).json() 31 32 # Format response 33 result = [] 34 answers = response_raw["answers"] 35 for i in range(len(answers)): 36 answer = answers[i] 37 answer_text = answer["answer"] 38 if answer_text: 39 result.append( 40 { 41 "context": "..." + answer["context"] + "...", 42 "answer": answer_text, 43 "source": answer["meta"]["name"], 44 "relevance": round(answer["score"] * 100, 2), 45 "document_id": answer["document_id"], 46 "offset_start_in_doc": answer["offsets_in_document"][0]["start"], 47 } 48 ) 49 return result, response_raw 50 51 52 def feedback_doc(question, is_correct_answer, document_id, model_id, is_correct_document, answer, offset_start_in_doc): 53 # Feedback Haystack API 54 url = f"{API_ENDPOINT}/{DOC_FEEDBACK}" 55 #TODO adjust after Label refactoring 56 req = { 57 "question": question, 58 "is_correct_answer": is_correct_answer, 59 "document_id": document_id, 60 "model_id": model_id, 61 "is_correct_document": is_correct_document, 62 "answer": answer, 63 "offset_start_in_doc": offset_start_in_doc, 64 } 65 response_raw = requests.post(url, json=req).json() 66 return response_raw 67 68 69 def upload_doc(file): 70 url = f"{API_ENDPOINT}/{DOC_UPLOAD}" 71 files = [("files", file)] 72 response_raw = requests.post(url, files=files).json() 73 return response_raw 74 [end of ui/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ui/utils.py b/ui/utils.py --- a/ui/utils.py +++ b/ui/utils.py @@ -25,7 +25,7 @@ def retrieve_doc(query, filters=None, top_k_reader=5, top_k_retriever=5): # Query Haystack API url = f"{API_ENDPOINT}/{DOC_REQUEST}" - params = {"filters": filters, "ESRetriever": {"top_k": top_k_retriever}, "Reader": {"top_k": top_k_reader}} + params = {"filters": filters, "Retriever": {"top_k": top_k_retriever}, "Reader": {"top_k": top_k_reader}} req = {"query": query, "params": params} response_raw = requests.post(url, json=req).json()
{"golden_diff": "diff --git a/ui/utils.py b/ui/utils.py\n--- a/ui/utils.py\n+++ b/ui/utils.py\n@@ -25,7 +25,7 @@\n def retrieve_doc(query, filters=None, top_k_reader=5, top_k_retriever=5):\n # Query Haystack API\n url = f\"{API_ENDPOINT}/{DOC_REQUEST}\"\n- params = {\"filters\": filters, \"ESRetriever\": {\"top_k\": top_k_retriever}, \"Reader\": {\"top_k\": top_k_reader}}\n+ params = {\"filters\": filters, \"Retriever\": {\"top_k\": top_k_retriever}, \"Reader\": {\"top_k\": top_k_reader}}\n req = {\"query\": query, \"params\": params}\n response_raw = requests.post(url, json=req).json()\n", "issue": "Connection Error. Is Haystack running? JSON Issue with Docker Compose On Fresh Repo and Image Pulls\n### Discussed in https://github.com/deepset-ai/haystack/discussions/1617\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **sieu-tran** October 19, 2021</sup>\r\nBetween October 18, 2021 and October 19, 2021, something has changed and the docker returns the error: \"Connection Error. Is Haystack running?\" when we start running it.</div>\n", "before_files": [{"content": "import os\n\nimport logging\nimport requests\nimport streamlit as st\n\nAPI_ENDPOINT = os.getenv(\"API_ENDPOINT\", \"http://localhost:8000\")\nSTATUS = \"initialized\"\nDOC_REQUEST = \"query\"\nDOC_FEEDBACK = \"feedback\"\nDOC_UPLOAD = \"file-upload\"\n\n\ndef haystack_is_ready():\n url = f\"{API_ENDPOINT}/{STATUS}\"\n try:\n if requests.get(url).json():\n return True\n except Exception as e:\n logging.exception(e)\n return False\n\n\[email protected](show_spinner=False)\ndef retrieve_doc(query, filters=None, top_k_reader=5, top_k_retriever=5):\n # Query Haystack API\n url = f\"{API_ENDPOINT}/{DOC_REQUEST}\"\n params = {\"filters\": filters, \"ESRetriever\": {\"top_k\": top_k_retriever}, \"Reader\": {\"top_k\": top_k_reader}}\n req = {\"query\": query, \"params\": params}\n response_raw = requests.post(url, json=req).json()\n\n # Format response\n result = []\n answers = response_raw[\"answers\"]\n for i in range(len(answers)):\n answer = answers[i]\n answer_text = answer[\"answer\"]\n if answer_text:\n result.append(\n {\n \"context\": \"...\" + answer[\"context\"] + \"...\",\n \"answer\": answer_text,\n \"source\": answer[\"meta\"][\"name\"],\n \"relevance\": round(answer[\"score\"] * 100, 2),\n \"document_id\": answer[\"document_id\"],\n \"offset_start_in_doc\": answer[\"offsets_in_document\"][0][\"start\"],\n }\n )\n return result, response_raw\n\n\ndef feedback_doc(question, is_correct_answer, document_id, model_id, is_correct_document, answer, offset_start_in_doc):\n # Feedback Haystack API\n url = f\"{API_ENDPOINT}/{DOC_FEEDBACK}\"\n #TODO adjust after Label refactoring\n req = {\n \"question\": question,\n \"is_correct_answer\": is_correct_answer,\n \"document_id\": document_id,\n \"model_id\": model_id,\n \"is_correct_document\": is_correct_document,\n \"answer\": answer,\n \"offset_start_in_doc\": offset_start_in_doc,\n }\n response_raw = requests.post(url, json=req).json()\n return response_raw\n\n\ndef upload_doc(file):\n url = f\"{API_ENDPOINT}/{DOC_UPLOAD}\"\n files = [(\"files\", file)]\n response_raw = requests.post(url, files=files).json()\n return response_raw\n", "path": "ui/utils.py"}]}
1,345
174
gh_patches_debug_21955
rasdani/github-patches
git_diff
netbox-community__netbox-15135
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tunnel Group is not selectable as column in VPN tunnels list ### Deployment Type Self-hosted ### NetBox Version v3.7.2 ### Python Version 3.11 ### Steps to Reproduce 1. Go to VPN - Tunnels 2. Click Configure Table ### Expected Behavior There is the Tunnel Group column ### Observed Behavior There is no Tunnel Group column that could be shown </issue> <code> [start of netbox/vpn/tables/tunnels.py] 1 import django_tables2 as tables 2 from django.utils.translation import gettext_lazy as _ 3 from django_tables2.utils import Accessor 4 5 from netbox.tables import NetBoxTable, columns 6 from tenancy.tables import TenancyColumnsMixin 7 from vpn.models import * 8 9 __all__ = ( 10 'TunnelTable', 11 'TunnelGroupTable', 12 'TunnelTerminationTable', 13 ) 14 15 16 class TunnelGroupTable(NetBoxTable): 17 name = tables.Column( 18 verbose_name=_('Name'), 19 linkify=True 20 ) 21 tunnel_count = columns.LinkedCountColumn( 22 viewname='vpn:tunnel_list', 23 url_params={'group_id': 'pk'}, 24 verbose_name=_('Tunnels') 25 ) 26 tags = columns.TagColumn( 27 url_name='vpn:tunnelgroup_list' 28 ) 29 30 class Meta(NetBoxTable.Meta): 31 model = TunnelGroup 32 fields = ( 33 'pk', 'id', 'name', 'tunnel_count', 'description', 'slug', 'tags', 'actions', 'created', 'last_updated', 34 ) 35 default_columns = ('pk', 'name', 'tunnel_count', 'description') 36 37 38 class TunnelTable(TenancyColumnsMixin, NetBoxTable): 39 name = tables.Column( 40 verbose_name=_('Name'), 41 linkify=True 42 ) 43 status = columns.ChoiceFieldColumn( 44 verbose_name=_('Status') 45 ) 46 ipsec_profile = tables.Column( 47 verbose_name=_('IPSec profile'), 48 linkify=True 49 ) 50 terminations_count = columns.LinkedCountColumn( 51 accessor=Accessor('count_terminations'), 52 viewname='vpn:tunneltermination_list', 53 url_params={'tunnel_id': 'pk'}, 54 verbose_name=_('Terminations') 55 ) 56 comments = columns.MarkdownColumn( 57 verbose_name=_('Comments'), 58 ) 59 tags = columns.TagColumn( 60 url_name='vpn:tunnel_list' 61 ) 62 63 class Meta(NetBoxTable.Meta): 64 model = Tunnel 65 fields = ( 66 'pk', 'id', 'name', 'status', 'encapsulation', 'ipsec_profile', 'tenant', 'tenant_group', 'tunnel_id', 67 'termination_count', 'description', 'comments', 'tags', 'created', 'last_updated', 68 ) 69 default_columns = ('pk', 'name', 'status', 'encapsulation', 'tenant', 'terminations_count') 70 71 72 class TunnelTerminationTable(TenancyColumnsMixin, NetBoxTable): 73 tunnel = tables.Column( 74 verbose_name=_('Tunnel'), 75 linkify=True 76 ) 77 role = columns.ChoiceFieldColumn( 78 verbose_name=_('Role') 79 ) 80 termination_parent = tables.Column( 81 accessor='termination__parent_object', 82 linkify=True, 83 orderable=False, 84 verbose_name=_('Host') 85 ) 86 termination = tables.Column( 87 verbose_name=_('Interface'), 88 linkify=True 89 ) 90 ip_addresses = tables.ManyToManyColumn( 91 accessor=tables.A('termination__ip_addresses'), 92 orderable=False, 93 linkify_item=True, 94 verbose_name=_('IP Addresses') 95 ) 96 outside_ip = tables.Column( 97 verbose_name=_('Outside IP'), 98 linkify=True 99 ) 100 tags = columns.TagColumn( 101 url_name='vpn:tunneltermination_list' 102 ) 103 104 class Meta(NetBoxTable.Meta): 105 model = TunnelTermination 106 fields = ( 107 'pk', 'id', 'tunnel', 'role', 'termination_parent', 'termination', 'ip_addresses', 'outside_ip', 'tags', 108 'created', 'last_updated', 109 ) 110 default_columns = ( 111 'pk', 'id', 'tunnel', 'role', 'termination_parent', 'termination', 'ip_addresses', 'outside_ip', 112 ) 113 [end of netbox/vpn/tables/tunnels.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/vpn/tables/tunnels.py b/netbox/vpn/tables/tunnels.py --- a/netbox/vpn/tables/tunnels.py +++ b/netbox/vpn/tables/tunnels.py @@ -40,6 +40,10 @@ verbose_name=_('Name'), linkify=True ) + group = tables.Column( + verbose_name=_('Group'), + linkify=True + ) status = columns.ChoiceFieldColumn( verbose_name=_('Status') ) @@ -63,10 +67,10 @@ class Meta(NetBoxTable.Meta): model = Tunnel fields = ( - 'pk', 'id', 'name', 'status', 'encapsulation', 'ipsec_profile', 'tenant', 'tenant_group', 'tunnel_id', - 'termination_count', 'description', 'comments', 'tags', 'created', 'last_updated', + 'pk', 'id', 'name', 'group', 'status', 'encapsulation', 'ipsec_profile', 'tenant', 'tenant_group', + 'tunnel_id', 'termination_count', 'description', 'comments', 'tags', 'created', 'last_updated', ) - default_columns = ('pk', 'name', 'status', 'encapsulation', 'tenant', 'terminations_count') + default_columns = ('pk', 'name', 'group', 'status', 'encapsulation', 'tenant', 'terminations_count') class TunnelTerminationTable(TenancyColumnsMixin, NetBoxTable):
{"golden_diff": "diff --git a/netbox/vpn/tables/tunnels.py b/netbox/vpn/tables/tunnels.py\n--- a/netbox/vpn/tables/tunnels.py\n+++ b/netbox/vpn/tables/tunnels.py\n@@ -40,6 +40,10 @@\n verbose_name=_('Name'),\n linkify=True\n )\n+ group = tables.Column(\n+ verbose_name=_('Group'),\n+ linkify=True\n+ )\n status = columns.ChoiceFieldColumn(\n verbose_name=_('Status')\n )\n@@ -63,10 +67,10 @@\n class Meta(NetBoxTable.Meta):\n model = Tunnel\n fields = (\n- 'pk', 'id', 'name', 'status', 'encapsulation', 'ipsec_profile', 'tenant', 'tenant_group', 'tunnel_id',\n- 'termination_count', 'description', 'comments', 'tags', 'created', 'last_updated',\n+ 'pk', 'id', 'name', 'group', 'status', 'encapsulation', 'ipsec_profile', 'tenant', 'tenant_group',\n+ 'tunnel_id', 'termination_count', 'description', 'comments', 'tags', 'created', 'last_updated',\n )\n- default_columns = ('pk', 'name', 'status', 'encapsulation', 'tenant', 'terminations_count')\n+ default_columns = ('pk', 'name', 'group', 'status', 'encapsulation', 'tenant', 'terminations_count')\n \n \n class TunnelTerminationTable(TenancyColumnsMixin, NetBoxTable):\n", "issue": "Tunnel Group is not selectable as column in VPN tunnels list\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv3.7.2\n\n### Python Version\n\n3.11\n\n### Steps to Reproduce\n\n1. Go to VPN - Tunnels\r\n2. Click Configure Table\r\n\n\n### Expected Behavior\n\nThere is the Tunnel Group column\n\n### Observed Behavior\n\nThere is no Tunnel Group column that could be shown\n", "before_files": [{"content": "import django_tables2 as tables\nfrom django.utils.translation import gettext_lazy as _\nfrom django_tables2.utils import Accessor\n\nfrom netbox.tables import NetBoxTable, columns\nfrom tenancy.tables import TenancyColumnsMixin\nfrom vpn.models import *\n\n__all__ = (\n 'TunnelTable',\n 'TunnelGroupTable',\n 'TunnelTerminationTable',\n)\n\n\nclass TunnelGroupTable(NetBoxTable):\n name = tables.Column(\n verbose_name=_('Name'),\n linkify=True\n )\n tunnel_count = columns.LinkedCountColumn(\n viewname='vpn:tunnel_list',\n url_params={'group_id': 'pk'},\n verbose_name=_('Tunnels')\n )\n tags = columns.TagColumn(\n url_name='vpn:tunnelgroup_list'\n )\n\n class Meta(NetBoxTable.Meta):\n model = TunnelGroup\n fields = (\n 'pk', 'id', 'name', 'tunnel_count', 'description', 'slug', 'tags', 'actions', 'created', 'last_updated',\n )\n default_columns = ('pk', 'name', 'tunnel_count', 'description')\n\n\nclass TunnelTable(TenancyColumnsMixin, NetBoxTable):\n name = tables.Column(\n verbose_name=_('Name'),\n linkify=True\n )\n status = columns.ChoiceFieldColumn(\n verbose_name=_('Status')\n )\n ipsec_profile = tables.Column(\n verbose_name=_('IPSec profile'),\n linkify=True\n )\n terminations_count = columns.LinkedCountColumn(\n accessor=Accessor('count_terminations'),\n viewname='vpn:tunneltermination_list',\n url_params={'tunnel_id': 'pk'},\n verbose_name=_('Terminations')\n )\n comments = columns.MarkdownColumn(\n verbose_name=_('Comments'),\n )\n tags = columns.TagColumn(\n url_name='vpn:tunnel_list'\n )\n\n class Meta(NetBoxTable.Meta):\n model = Tunnel\n fields = (\n 'pk', 'id', 'name', 'status', 'encapsulation', 'ipsec_profile', 'tenant', 'tenant_group', 'tunnel_id',\n 'termination_count', 'description', 'comments', 'tags', 'created', 'last_updated',\n )\n default_columns = ('pk', 'name', 'status', 'encapsulation', 'tenant', 'terminations_count')\n\n\nclass TunnelTerminationTable(TenancyColumnsMixin, NetBoxTable):\n tunnel = tables.Column(\n verbose_name=_('Tunnel'),\n linkify=True\n )\n role = columns.ChoiceFieldColumn(\n verbose_name=_('Role')\n )\n termination_parent = tables.Column(\n accessor='termination__parent_object',\n linkify=True,\n orderable=False,\n verbose_name=_('Host')\n )\n termination = tables.Column(\n verbose_name=_('Interface'),\n linkify=True\n )\n ip_addresses = tables.ManyToManyColumn(\n accessor=tables.A('termination__ip_addresses'),\n orderable=False,\n linkify_item=True,\n verbose_name=_('IP Addresses')\n )\n outside_ip = tables.Column(\n verbose_name=_('Outside IP'),\n linkify=True\n )\n tags = columns.TagColumn(\n url_name='vpn:tunneltermination_list'\n )\n\n class Meta(NetBoxTable.Meta):\n model = TunnelTermination\n fields = (\n 'pk', 'id', 'tunnel', 'role', 'termination_parent', 'termination', 'ip_addresses', 'outside_ip', 'tags',\n 'created', 'last_updated',\n )\n default_columns = (\n 'pk', 'id', 'tunnel', 'role', 'termination_parent', 'termination', 'ip_addresses', 'outside_ip',\n )\n", "path": "netbox/vpn/tables/tunnels.py"}]}
1,654
341
gh_patches_debug_31423
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-8374
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Wendy's incorrect hours ATP populates incorrect hours for Wendy's. It's populating 10:30 when [the Wendy's website](https://locations.wendys.com/united-states/oh/loveland/10601-loveland-madeira-rd) says 12:00am. Reference [OSM Changeset 151311478](https://www.openstreetmap.org/changeset/151311478#map=6/41.675/-77.593). I assume it has to do with parsing multiple blocks of hours on the same page. In this case, the web page lists restaurant hours and drive through hours, with an extra blurb about breakfast ending at 10:30 am, which I assume is where the 10:30 is coming from in the OSM import. </issue> <code> [start of locations/spiders/wendys.py] 1 from scrapy.spiders import SitemapSpider 2 3 from locations.structured_data_spider import StructuredDataSpider 4 5 6 class WendysSpider(SitemapSpider, StructuredDataSpider): 7 name = "wendys" 8 item_attributes = {"brand": "Wendy's", "brand_wikidata": "Q550258"} 9 sitemap_urls = ["https://locations.wendys.com/sitemap.xml"] 10 sitemap_rules = [(r"https://locations.wendys.com/.+/\w\w/.+/.+", "parse_sd")] 11 [end of locations/spiders/wendys.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/wendys.py b/locations/spiders/wendys.py --- a/locations/spiders/wendys.py +++ b/locations/spiders/wendys.py @@ -1,10 +1,48 @@ +import json + from scrapy.spiders import SitemapSpider +from locations.hours import OpeningHours from locations.structured_data_spider import StructuredDataSpider class WendysSpider(SitemapSpider, StructuredDataSpider): name = "wendys" item_attributes = {"brand": "Wendy's", "brand_wikidata": "Q550258"} + wanted_types = ["FastFoodRestaurant"] sitemap_urls = ["https://locations.wendys.com/sitemap.xml"] sitemap_rules = [(r"https://locations.wendys.com/.+/\w\w/.+/.+", "parse_sd")] + + def post_process_item(self, item, response, ld_data, **kwargs): + item["website"] = ld_data.get("url") + + # Opening hours for the drive-through seem to get included with regular hours, so clean that up + opening_hours_divs = response.xpath('//div[@class="c-location-hours-details-wrapper js-location-hours"]') + item["opening_hours"] = self.clean_hours(opening_hours_divs[0]) + + if len(opening_hours_divs) > 1: + item["extras"]["opening_hours:drive_through"] = self.clean_hours(opening_hours_divs[1]) + + if breakfast_hours_divs := response.xpath( + '//div[@class="LocationInfo-breakfastInfo js-breakfastInfo"]/span[@class="c-location-hours-today js-location-hours"]' + ): + item["extras"]["breakfast"] = self.clean_hours(breakfast_hours_divs[0]) + + yield item + + @staticmethod + def clean_hours(hours_div): + days = hours_div.xpath(".//@data-days").extract_first() + days = json.loads(days) + + oh = OpeningHours() + + for day in days: + for interval in day["intervals"]: + # These interval ranges are 24 hour times represented as integers, so they need to be converted to strings + open_time = str(interval["start"]).zfill(4) + close_time = str(interval["end"]).zfill(4) + + oh.add_range(day=day["day"].title()[:2], open_time=open_time, close_time=close_time, time_format="%H%M") + + return oh.as_opening_hours()
{"golden_diff": "diff --git a/locations/spiders/wendys.py b/locations/spiders/wendys.py\n--- a/locations/spiders/wendys.py\n+++ b/locations/spiders/wendys.py\n@@ -1,10 +1,48 @@\n+import json\n+\n from scrapy.spiders import SitemapSpider\n \n+from locations.hours import OpeningHours\n from locations.structured_data_spider import StructuredDataSpider\n \n \n class WendysSpider(SitemapSpider, StructuredDataSpider):\n name = \"wendys\"\n item_attributes = {\"brand\": \"Wendy's\", \"brand_wikidata\": \"Q550258\"}\n+ wanted_types = [\"FastFoodRestaurant\"]\n sitemap_urls = [\"https://locations.wendys.com/sitemap.xml\"]\n sitemap_rules = [(r\"https://locations.wendys.com/.+/\\w\\w/.+/.+\", \"parse_sd\")]\n+\n+ def post_process_item(self, item, response, ld_data, **kwargs):\n+ item[\"website\"] = ld_data.get(\"url\")\n+\n+ # Opening hours for the drive-through seem to get included with regular hours, so clean that up\n+ opening_hours_divs = response.xpath('//div[@class=\"c-location-hours-details-wrapper js-location-hours\"]')\n+ item[\"opening_hours\"] = self.clean_hours(opening_hours_divs[0])\n+\n+ if len(opening_hours_divs) > 1:\n+ item[\"extras\"][\"opening_hours:drive_through\"] = self.clean_hours(opening_hours_divs[1])\n+\n+ if breakfast_hours_divs := response.xpath(\n+ '//div[@class=\"LocationInfo-breakfastInfo js-breakfastInfo\"]/span[@class=\"c-location-hours-today js-location-hours\"]'\n+ ):\n+ item[\"extras\"][\"breakfast\"] = self.clean_hours(breakfast_hours_divs[0])\n+\n+ yield item\n+\n+ @staticmethod\n+ def clean_hours(hours_div):\n+ days = hours_div.xpath(\".//@data-days\").extract_first()\n+ days = json.loads(days)\n+\n+ oh = OpeningHours()\n+\n+ for day in days:\n+ for interval in day[\"intervals\"]:\n+ # These interval ranges are 24 hour times represented as integers, so they need to be converted to strings\n+ open_time = str(interval[\"start\"]).zfill(4)\n+ close_time = str(interval[\"end\"]).zfill(4)\n+\n+ oh.add_range(day=day[\"day\"].title()[:2], open_time=open_time, close_time=close_time, time_format=\"%H%M\")\n+\n+ return oh.as_opening_hours()\n", "issue": "Wendy's incorrect hours\nATP populates incorrect hours for Wendy's. It's populating 10:30 when [the Wendy's website](https://locations.wendys.com/united-states/oh/loveland/10601-loveland-madeira-rd) says 12:00am. Reference [OSM Changeset 151311478](https://www.openstreetmap.org/changeset/151311478#map=6/41.675/-77.593). I assume it has to do with parsing multiple blocks of hours on the same page. In this case, the web page lists restaurant hours and drive through hours, with an extra blurb about breakfast ending at 10:30 am, which I assume is where the 10:30 is coming from in the OSM import.\n", "before_files": [{"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass WendysSpider(SitemapSpider, StructuredDataSpider):\n name = \"wendys\"\n item_attributes = {\"brand\": \"Wendy's\", \"brand_wikidata\": \"Q550258\"}\n sitemap_urls = [\"https://locations.wendys.com/sitemap.xml\"]\n sitemap_rules = [(r\"https://locations.wendys.com/.+/\\w\\w/.+/.+\", \"parse_sd\")]\n", "path": "locations/spiders/wendys.py"}]}
870
569
gh_patches_debug_23365
rasdani/github-patches
git_diff
biopython__biopython-4029
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add option to silence/capture stdout/stderr from Bio.PDB.PSEA Spun from #3980. Running `p-sea` through `Bio.PDB.PSEA` will always print output to standard out. We should refactor the code to use `subprocess.run` and add a keyword to the top-level `run_psea` function to control whether to print or capture the tool's output. A mock implementation that adds some better error handling would be: ```import subprocess def run_psea(pname, verbose=True): cmd = [...] try: p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) except subprocess.CalledProcessError as err: print(f"Command {cmd} failed") raise if verbose: print(p.stdout) ... ``` </issue> <code> [start of Bio/PDB/PSEA.py] 1 # Copyright (C) 2006, Thomas Hamelryck ([email protected]) 2 # 3 # This file is part of the Biopython distribution and governed by your 4 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". 5 # Please see the LICENSE file that should have been included as part of this 6 # package. 7 8 """Wrappers for PSEA, a program for secondary structure assignment. 9 10 See this citation for P-SEA, PMID: 9183534 11 12 Labesse G, Colloc'h N, Pothier J, Mornon J-P: P-SEA: a new efficient 13 assignment of secondary structure from C_alpha. 14 Comput Appl Biosci 1997 , 13:291-295 15 16 ftp://ftp.lmcp.jussieu.fr/pub/sincris/software/protein/p-sea/ 17 """ 18 19 import subprocess 20 21 from Bio.PDB.Polypeptide import is_aa 22 23 24 def run_psea(fname): 25 """Run PSEA and return output filename. 26 27 Note that this assumes the P-SEA binary is called "psea" and that it is 28 on the path. 29 30 Note that P-SEA will write an output file in the current directory using 31 the input filename with extension ".sea". 32 33 Note that P-SEA will write output to the terminal while run. 34 """ 35 subprocess.call(["psea", fname]) 36 last = fname.split("/")[-1] 37 base = last.split(".")[0] 38 return base + ".sea" 39 40 41 def psea(pname): 42 """Parse PSEA output file.""" 43 fname = run_psea(pname) 44 start = 0 45 ss = "" 46 with open(fname) as fp: 47 for l in fp: 48 if l[0:6] == ">p-sea": 49 start = 1 50 continue 51 if not start: 52 continue 53 if l[0] == "\n": 54 break 55 ss = ss + l[0:-1] 56 return ss 57 58 59 def psea2HEC(pseq): 60 """Translate PSEA secondary structure string into HEC.""" 61 seq = [] 62 for ss in pseq: 63 if ss == "a": 64 n = "H" 65 elif ss == "b": 66 n = "E" 67 elif ss == "c": 68 n = "C" 69 seq.append(n) 70 return seq 71 72 73 def annotate(m, ss_seq): 74 """Apply secondary structure information to residues in model.""" 75 c = m.get_list()[0] 76 all = c.get_list() 77 residues = [] 78 # Now remove HOH etc. 79 for res in all: 80 if is_aa(res): 81 residues.append(res) 82 L = len(residues) 83 if not L == len(ss_seq): 84 raise ValueError("Length mismatch %i %i" % (L, len(ss_seq))) 85 for i in range(0, L): 86 residues[i].xtra["SS_PSEA"] = ss_seq[i] 87 # subprocess.call(["rm", fname]) 88 89 90 class PSEA: 91 """Define PSEA class. 92 93 PSEA object is a wrapper to PSEA program for secondary structure assignment. 94 """ 95 96 def __init__(self, model, filename): 97 """Initialize the class.""" 98 ss_seq = psea(filename) 99 ss_seq = psea2HEC(ss_seq) 100 annotate(model, ss_seq) 101 self.ss_seq = ss_seq 102 103 def get_seq(self): 104 """Return secondary structure string.""" 105 return self.ss_seq 106 [end of Bio/PDB/PSEA.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Bio/PDB/PSEA.py b/Bio/PDB/PSEA.py --- a/Bio/PDB/PSEA.py +++ b/Bio/PDB/PSEA.py @@ -17,11 +17,12 @@ """ import subprocess +import os from Bio.PDB.Polypeptide import is_aa -def run_psea(fname): +def run_psea(fname, verbose=False): """Run PSEA and return output filename. Note that this assumes the P-SEA binary is called "psea" and that it is @@ -30,12 +31,22 @@ Note that P-SEA will write an output file in the current directory using the input filename with extension ".sea". - Note that P-SEA will write output to the terminal while run. + Note that P-SEA will not write output to the terminal while run unless + verbose is set to True. """ - subprocess.call(["psea", fname]) last = fname.split("/")[-1] base = last.split(".")[0] - return base + ".sea" + cmd = ["psea", fname] + + p = subprocess.run(cmd, capture_output=True, universal_newlines=True) + + if verbose: + print(p.stdout) + + if not p.stderr.strip() and os.path.exists(base + ".sea"): + return base + ".sea" + else: + raise RuntimeError(f"Error running p-sea: {p.stderr}") def psea(pname):
{"golden_diff": "diff --git a/Bio/PDB/PSEA.py b/Bio/PDB/PSEA.py\n--- a/Bio/PDB/PSEA.py\n+++ b/Bio/PDB/PSEA.py\n@@ -17,11 +17,12 @@\n \"\"\"\n \n import subprocess\n+import os\n \n from Bio.PDB.Polypeptide import is_aa\n \n \n-def run_psea(fname):\n+def run_psea(fname, verbose=False):\n \"\"\"Run PSEA and return output filename.\n \n Note that this assumes the P-SEA binary is called \"psea\" and that it is\n@@ -30,12 +31,22 @@\n Note that P-SEA will write an output file in the current directory using\n the input filename with extension \".sea\".\n \n- Note that P-SEA will write output to the terminal while run.\n+ Note that P-SEA will not write output to the terminal while run unless\n+ verbose is set to True.\n \"\"\"\n- subprocess.call([\"psea\", fname])\n last = fname.split(\"/\")[-1]\n base = last.split(\".\")[0]\n- return base + \".sea\"\n+ cmd = [\"psea\", fname]\n+\n+ p = subprocess.run(cmd, capture_output=True, universal_newlines=True)\n+\n+ if verbose:\n+ print(p.stdout)\n+\n+ if not p.stderr.strip() and os.path.exists(base + \".sea\"):\n+ return base + \".sea\"\n+ else:\n+ raise RuntimeError(f\"Error running p-sea: {p.stderr}\")\n \n \n def psea(pname):\n", "issue": "Add option to silence/capture stdout/stderr from Bio.PDB.PSEA\nSpun from #3980.\r\n\r\nRunning `p-sea` through `Bio.PDB.PSEA` will always print output to standard out. We should refactor the code to use `subprocess.run` and add a keyword to the top-level `run_psea` function to control whether to print or capture the tool's output.\r\n\r\nA mock implementation that adds some better error handling would be:\r\n\r\n```import subprocess\r\n\r\ndef run_psea(pname, verbose=True):\r\n cmd = [...]\r\n try:\r\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)\r\n except subprocess.CalledProcessError as err:\r\n print(f\"Command {cmd} failed\")\r\n raise\r\n\r\n if verbose:\r\n print(p.stdout)\r\n ...\r\n```\n", "before_files": [{"content": "# Copyright (C) 2006, Thomas Hamelryck ([email protected])\n#\n# This file is part of the Biopython distribution and governed by your\n# choice of the \"Biopython License Agreement\" or the \"BSD 3-Clause License\".\n# Please see the LICENSE file that should have been included as part of this\n# package.\n\n\"\"\"Wrappers for PSEA, a program for secondary structure assignment.\n\nSee this citation for P-SEA, PMID: 9183534\n\nLabesse G, Colloc'h N, Pothier J, Mornon J-P: P-SEA: a new efficient\nassignment of secondary structure from C_alpha.\nComput Appl Biosci 1997 , 13:291-295\n\nftp://ftp.lmcp.jussieu.fr/pub/sincris/software/protein/p-sea/\n\"\"\"\n\nimport subprocess\n\nfrom Bio.PDB.Polypeptide import is_aa\n\n\ndef run_psea(fname):\n \"\"\"Run PSEA and return output filename.\n\n Note that this assumes the P-SEA binary is called \"psea\" and that it is\n on the path.\n\n Note that P-SEA will write an output file in the current directory using\n the input filename with extension \".sea\".\n\n Note that P-SEA will write output to the terminal while run.\n \"\"\"\n subprocess.call([\"psea\", fname])\n last = fname.split(\"/\")[-1]\n base = last.split(\".\")[0]\n return base + \".sea\"\n\n\ndef psea(pname):\n \"\"\"Parse PSEA output file.\"\"\"\n fname = run_psea(pname)\n start = 0\n ss = \"\"\n with open(fname) as fp:\n for l in fp:\n if l[0:6] == \">p-sea\":\n start = 1\n continue\n if not start:\n continue\n if l[0] == \"\\n\":\n break\n ss = ss + l[0:-1]\n return ss\n\n\ndef psea2HEC(pseq):\n \"\"\"Translate PSEA secondary structure string into HEC.\"\"\"\n seq = []\n for ss in pseq:\n if ss == \"a\":\n n = \"H\"\n elif ss == \"b\":\n n = \"E\"\n elif ss == \"c\":\n n = \"C\"\n seq.append(n)\n return seq\n\n\ndef annotate(m, ss_seq):\n \"\"\"Apply secondary structure information to residues in model.\"\"\"\n c = m.get_list()[0]\n all = c.get_list()\n residues = []\n # Now remove HOH etc.\n for res in all:\n if is_aa(res):\n residues.append(res)\n L = len(residues)\n if not L == len(ss_seq):\n raise ValueError(\"Length mismatch %i %i\" % (L, len(ss_seq)))\n for i in range(0, L):\n residues[i].xtra[\"SS_PSEA\"] = ss_seq[i]\n # subprocess.call([\"rm\", fname])\n\n\nclass PSEA:\n \"\"\"Define PSEA class.\n\n PSEA object is a wrapper to PSEA program for secondary structure assignment.\n \"\"\"\n\n def __init__(self, model, filename):\n \"\"\"Initialize the class.\"\"\"\n ss_seq = psea(filename)\n ss_seq = psea2HEC(ss_seq)\n annotate(model, ss_seq)\n self.ss_seq = ss_seq\n\n def get_seq(self):\n \"\"\"Return secondary structure string.\"\"\"\n return self.ss_seq\n", "path": "Bio/PDB/PSEA.py"}]}
1,693
339
gh_patches_debug_2944
rasdani/github-patches
git_diff
ivy-llc__ivy-14663
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix generating_index_arrays.test_numpy_diag_indices | | | |---|---| |paddle|<a href="https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582"><img src=https://img.shields.io/badge/-failure-red></a> |tensorflow|<a href="https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582"><img src=https://img.shields.io/badge/-failure-red></a> |torch|<a href="https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582"><img src=https://img.shields.io/badge/-failure-red></a> |numpy|<a href="https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582"><img src=https://img.shields.io/badge/-failure-red></a> |jax|<a href="https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582"><img src=https://img.shields.io/badge/-failure-red></a> </issue> <code> [start of ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py] 1 import ivy 2 from ivy.functional.frontends.numpy.func_wrapper import ( 3 to_ivy_arrays_and_back, 4 outputs_to_numpy_arrays, 5 ) 6 7 8 @to_ivy_arrays_and_back 9 def indices(dimensions, dtype=int, sparse=False): 10 dimensions = tuple(dimensions) 11 N = len(dimensions) 12 shape = (1,) * N 13 if sparse: 14 res = tuple() 15 else: 16 res = ivy.empty((N,) + dimensions, dtype=dtype) 17 for i, dim in enumerate(dimensions): 18 idx = ivy.arange(dim, dtype=dtype).reshape(shape[:i] + (dim,) + shape[i + 1 :]) 19 if sparse: 20 res = res + (idx,) 21 else: 22 res[i] = idx 23 return res 24 25 26 # unravel_index 27 @to_ivy_arrays_and_back 28 def unravel_index(indices, shape, order="C"): 29 ret = [x.astype("int64") for x in ivy.unravel_index(indices, shape)] 30 return tuple(ret) 31 32 33 @outputs_to_numpy_arrays 34 def diag_indices(n, ndim=2): 35 idx = ivy.arange(n, dtype=int) 36 return (idx,) * ndim 37 38 39 @to_ivy_arrays_and_back 40 def tril_indices(n, k=0, m=None): 41 return ivy.tril_indices(n, m, k) 42 [end of ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py b/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py --- a/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py +++ b/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py @@ -30,10 +30,12 @@ return tuple(ret) -@outputs_to_numpy_arrays +@to_ivy_arrays_and_back def diag_indices(n, ndim=2): - idx = ivy.arange(n, dtype=int) - return (idx,) * ndim + idx = ivy.arange(n) + res = ivy.array((idx,) * ndim) + res = tuple(res.astype("int64")) + return res @to_ivy_arrays_and_back
{"golden_diff": "diff --git a/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py b/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py\n--- a/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py\n+++ b/ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py\n@@ -30,10 +30,12 @@\n return tuple(ret)\n \n \n-@outputs_to_numpy_arrays\n+@to_ivy_arrays_and_back\n def diag_indices(n, ndim=2):\n- idx = ivy.arange(n, dtype=int)\n- return (idx,) * ndim\n+ idx = ivy.arange(n)\n+ res = ivy.array((idx,) * ndim)\n+ res = tuple(res.astype(\"int64\"))\n+ return res\n \n \n @to_ivy_arrays_and_back\n", "issue": "Fix generating_index_arrays.test_numpy_diag_indices\n| | |\r\n|---|---|\r\n|paddle|<a href=\"https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582\"><img src=https://img.shields.io/badge/-failure-red></a>\r\n|tensorflow|<a href=\"https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582\"><img src=https://img.shields.io/badge/-failure-red></a>\r\n|torch|<a href=\"https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582\"><img src=https://img.shields.io/badge/-failure-red></a>\r\n|numpy|<a href=\"https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582\"><img src=https://img.shields.io/badge/-failure-red></a>\r\n|jax|<a href=\"https://github.com/unifyai/ivy/actions/runs/6413197943/job/17411744582\"><img src=https://img.shields.io/badge/-failure-red></a>\r\n\n", "before_files": [{"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n outputs_to_numpy_arrays,\n)\n\n\n@to_ivy_arrays_and_back\ndef indices(dimensions, dtype=int, sparse=False):\n dimensions = tuple(dimensions)\n N = len(dimensions)\n shape = (1,) * N\n if sparse:\n res = tuple()\n else:\n res = ivy.empty((N,) + dimensions, dtype=dtype)\n for i, dim in enumerate(dimensions):\n idx = ivy.arange(dim, dtype=dtype).reshape(shape[:i] + (dim,) + shape[i + 1 :])\n if sparse:\n res = res + (idx,)\n else:\n res[i] = idx\n return res\n\n\n# unravel_index\n@to_ivy_arrays_and_back\ndef unravel_index(indices, shape, order=\"C\"):\n ret = [x.astype(\"int64\") for x in ivy.unravel_index(indices, shape)]\n return tuple(ret)\n\n\n@outputs_to_numpy_arrays\ndef diag_indices(n, ndim=2):\n idx = ivy.arange(n, dtype=int)\n return (idx,) * ndim\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n, k=0, m=None):\n return ivy.tril_indices(n, m, k)\n", "path": "ivy/functional/frontends/numpy/indexing_routines/generating_index_arrays.py"}]}
1,252
198
gh_patches_debug_38909
rasdani/github-patches
git_diff
bridgecrewio__checkov-2330
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BC_AWS_PUBLIC_1 does not take Conditions into account **Describe the bug** BC_AWS_PUBLIC_1 seems to only look for the principal in a ECR Resource Policy. If you want to control access to a repo based on the pricipals AWS Organization ID, you have to do this using a condition instead of a Principal. It seems like checkov does not take this into account. Example: ``` yaml Repository: Type: AWS::ECR::Repository Properties: RepositoryName: !Ref RepoName RepositoryPolicyText: Version: "2012-10-17" Statement: - Sid: OrgRead Effect: Allow Principal: "*" Action: - ecr:GetAuthorizationToken - ecr:BatchCheckLayerAvailability - ecr:GetDownloadUrlForLayer - ecr:GetRepositoryPolicy - ecr:DescribeRepositories - ecr:ListImages - ecr:DescribeImages - ecr:BatchGetImage - ecr:DescribeImageScanFindings Condition: ForAllValues:StringEquals: aws:PrincipalOrgID: "o-12345678" ``` **To Reproduce** Create the above mentioned resource in cloudformation and scan it with checkov **Expected behavior** I expect checkov to look for the usage of a PrincipalOrgID or similar global condition keys and not fail the check when they are used to make the repository non public. **Desktop (please complete the following information):** - OS: macOS 11.5.2 - Checkov Version 2.0.390 </issue> <code> [start of checkov/terraform/checks/resource/aws/ECRPolicy.py] 1 2 from checkov.common.models.enums import CheckResult, CheckCategories 3 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 4 from typing import List 5 6 7 class ECRPolicy(BaseResourceCheck): 8 def __init__(self): 9 name = "Ensure ECR policy is not set to public" 10 id = "CKV_AWS_32" 11 supported_resources = ['aws_ecr_repository_policy'] 12 categories = [CheckCategories.GENERAL_SECURITY] 13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 14 15 def scan_resource_conf(self, conf): 16 """ 17 Looks for public * policy for ecr repository: 18 https://www.terraform.io/docs/providers/aws/r/ecr_repository_policy.html 19 :param conf: aws_ecr_repository configuration 20 :return: <CheckResult> 21 """ 22 if "policy" in conf.keys(): 23 policy = conf["policy"][0] 24 if type(policy) is str: 25 return CheckResult.PASSED 26 if policy['Statement'][0] and type(policy['Statement'][0]) is dict: 27 statement = policy['Statement'][0] 28 if statement['Principal'] and type(statement['Principal']) is str: 29 principal = statement['Principal'] 30 if principal == "*": 31 self.evaluated_keys = ["policy/Statement/Principal"] 32 return CheckResult.FAILED 33 return CheckResult.PASSED 34 35 def get_evaluated_keys(self) -> List[str]: 36 return ['policy'] 37 38 39 check = ECRPolicy() 40 [end of checkov/terraform/checks/resource/aws/ECRPolicy.py] [start of checkov/cloudformation/checks/resource/aws/ECRPolicy.py] 1 import json 2 from typing import List 3 4 from checkov.common.parsers.node import StrNode 5 from checkov.common.models.enums import CheckResult, CheckCategories 6 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck 7 8 9 class ECRPolicy(BaseResourceCheck): 10 def __init__(self): 11 name = "Ensure ECR policy is not set to public" 12 id = "CKV_AWS_32" 13 supported_resources = ['AWS::ECR::Repository'] 14 categories = [CheckCategories.GENERAL_SECURITY] 15 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 16 17 def scan_resource_conf(self, conf): 18 """ 19 Looks for public * policy for ecr repository: 20 https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html 21 :param conf: aws_ecr_repository configuration 22 :return: <CheckResult> 23 """ 24 self.evaluated_keys = ["Properties/RepositoryPolicyText/Statement"] 25 if 'Properties' in conf.keys(): 26 if 'RepositoryPolicyText' in conf['Properties'].keys(): 27 policy_text = conf['Properties']['RepositoryPolicyText'] 28 if type(policy_text) in (str, StrNode): 29 policy_text = json.loads(str(policy_text)) 30 if 'Statement' in policy_text.keys(): 31 for statement_index, statement in enumerate(policy_text['Statement']): 32 if 'Principal' in statement.keys(): 33 for principal_index, principal in enumerate(statement['Principal']): 34 if principal == "*": 35 self.evaluated_keys = [f"Properties/RepositoryPolicyText/Statement/[{statement_index}]/Principal/[{principal_index}]"] 36 return CheckResult.FAILED 37 return CheckResult.PASSED 38 39 check = ECRPolicy() 40 [end of checkov/cloudformation/checks/resource/aws/ECRPolicy.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/cloudformation/checks/resource/aws/ECRPolicy.py b/checkov/cloudformation/checks/resource/aws/ECRPolicy.py --- a/checkov/cloudformation/checks/resource/aws/ECRPolicy.py +++ b/checkov/cloudformation/checks/resource/aws/ECRPolicy.py @@ -31,9 +31,22 @@ for statement_index, statement in enumerate(policy_text['Statement']): if 'Principal' in statement.keys(): for principal_index, principal in enumerate(statement['Principal']): - if principal == "*": + if principal == "*" and not self.check_for_constrained_condition(statement): self.evaluated_keys = [f"Properties/RepositoryPolicyText/Statement/[{statement_index}]/Principal/[{principal_index}]"] return CheckResult.FAILED return CheckResult.PASSED + def check_for_constrained_condition(self, statement): + """ + Checks to see if there is a constraint on a a wildcarded principal + :param statement: statement from aws_repository_configuration + :return: true if there is a constraint + """ + if 'Condition' in statement.keys(): + condition = statement['Condition'] + if 'ForAllValues:StringEquals' in condition.keys(): + if 'aws:PrincipalOrgID' in condition['ForAllValues:StringEquals'].keys(): + return True + return False + check = ECRPolicy() diff --git a/checkov/terraform/checks/resource/aws/ECRPolicy.py b/checkov/terraform/checks/resource/aws/ECRPolicy.py --- a/checkov/terraform/checks/resource/aws/ECRPolicy.py +++ b/checkov/terraform/checks/resource/aws/ECRPolicy.py @@ -27,7 +27,7 @@ statement = policy['Statement'][0] if statement['Principal'] and type(statement['Principal']) is str: principal = statement['Principal'] - if principal == "*": + if principal == "*" and not self.check_for_constrained_condition(statement): self.evaluated_keys = ["policy/Statement/Principal"] return CheckResult.FAILED return CheckResult.PASSED @@ -35,5 +35,17 @@ def get_evaluated_keys(self) -> List[str]: return ['policy'] + def check_for_constrained_condition(self, statement): + """ + Checks to see if there is a constraint on a a wildcarded principal + :param statement: statement from aws_repository_configuration + :return: True if there is a constraint + """ + if 'Condition' in statement and isinstance(statement['Condition'], dict): + condition = statement['Condition'] + if 'ForAllValues:StringEquals' in condition and isinstance(condition['ForAllValues:StringEquals'], dict): + if 'aws:PrincipalOrgID' in condition['ForAllValues:StringEquals']: + return True + return False check = ECRPolicy()
{"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/ECRPolicy.py b/checkov/cloudformation/checks/resource/aws/ECRPolicy.py\n--- a/checkov/cloudformation/checks/resource/aws/ECRPolicy.py\n+++ b/checkov/cloudformation/checks/resource/aws/ECRPolicy.py\n@@ -31,9 +31,22 @@\n for statement_index, statement in enumerate(policy_text['Statement']):\n if 'Principal' in statement.keys():\n for principal_index, principal in enumerate(statement['Principal']):\n- if principal == \"*\":\n+ if principal == \"*\" and not self.check_for_constrained_condition(statement):\n self.evaluated_keys = [f\"Properties/RepositoryPolicyText/Statement/[{statement_index}]/Principal/[{principal_index}]\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n \n+ def check_for_constrained_condition(self, statement):\n+ \"\"\"\n+ Checks to see if there is a constraint on a a wildcarded principal\n+ :param statement: statement from aws_repository_configuration\n+ :return: true if there is a constraint\n+ \"\"\"\n+ if 'Condition' in statement.keys():\n+ condition = statement['Condition']\n+ if 'ForAllValues:StringEquals' in condition.keys():\n+ if 'aws:PrincipalOrgID' in condition['ForAllValues:StringEquals'].keys():\n+ return True\n+ return False\n+\n check = ECRPolicy()\ndiff --git a/checkov/terraform/checks/resource/aws/ECRPolicy.py b/checkov/terraform/checks/resource/aws/ECRPolicy.py\n--- a/checkov/terraform/checks/resource/aws/ECRPolicy.py\n+++ b/checkov/terraform/checks/resource/aws/ECRPolicy.py\n@@ -27,7 +27,7 @@\n statement = policy['Statement'][0]\n if statement['Principal'] and type(statement['Principal']) is str:\n principal = statement['Principal']\n- if principal == \"*\":\n+ if principal == \"*\" and not self.check_for_constrained_condition(statement):\n self.evaluated_keys = [\"policy/Statement/Principal\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n@@ -35,5 +35,17 @@\n def get_evaluated_keys(self) -> List[str]:\n return ['policy']\n \n+ def check_for_constrained_condition(self, statement):\n+ \"\"\"\n+ Checks to see if there is a constraint on a a wildcarded principal\n+ :param statement: statement from aws_repository_configuration\n+ :return: True if there is a constraint\n+ \"\"\"\n+ if 'Condition' in statement and isinstance(statement['Condition'], dict):\n+ condition = statement['Condition']\n+ if 'ForAllValues:StringEquals' in condition and isinstance(condition['ForAllValues:StringEquals'], dict):\n+ if 'aws:PrincipalOrgID' in condition['ForAllValues:StringEquals']:\n+ return True\n+ return False\n \n check = ECRPolicy()\n", "issue": "BC_AWS_PUBLIC_1 does not take Conditions into account\n**Describe the bug**\r\nBC_AWS_PUBLIC_1 seems to only look for the principal in a ECR Resource Policy. If you want to control access to a repo based on the pricipals AWS Organization ID, you have to do this using a condition instead of a Principal. It seems like checkov does not take this into account.\r\n\r\nExample:\r\n\r\n``` yaml\r\n Repository:\r\n Type: AWS::ECR::Repository\r\n Properties:\r\n RepositoryName: !Ref RepoName\r\n RepositoryPolicyText:\r\n Version: \"2012-10-17\"\r\n Statement:\r\n - Sid: OrgRead\r\n Effect: Allow\r\n Principal: \"*\"\r\n Action:\r\n - ecr:GetAuthorizationToken\r\n - ecr:BatchCheckLayerAvailability\r\n - ecr:GetDownloadUrlForLayer\r\n - ecr:GetRepositoryPolicy\r\n - ecr:DescribeRepositories\r\n - ecr:ListImages\r\n - ecr:DescribeImages\r\n - ecr:BatchGetImage\r\n - ecr:DescribeImageScanFindings\r\n Condition:\r\n ForAllValues:StringEquals:\r\n aws:PrincipalOrgID: \"o-12345678\"\r\n``` \r\n\r\n**To Reproduce**\r\nCreate the above mentioned resource in cloudformation and scan it with checkov\r\n\r\n**Expected behavior**\r\nI expect checkov to look for the usage of a PrincipalOrgID or similar global condition keys and not fail the check when they are used to make the repository non public.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: macOS 11.5.2\r\n - Checkov Version 2.0.390\r\n\n", "before_files": [{"content": "\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom typing import List\n\n\nclass ECRPolicy(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure ECR policy is not set to public\"\n id = \"CKV_AWS_32\"\n supported_resources = ['aws_ecr_repository_policy']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for public * policy for ecr repository:\n https://www.terraform.io/docs/providers/aws/r/ecr_repository_policy.html\n :param conf: aws_ecr_repository configuration\n :return: <CheckResult>\n \"\"\"\n if \"policy\" in conf.keys():\n policy = conf[\"policy\"][0]\n if type(policy) is str:\n return CheckResult.PASSED\n if policy['Statement'][0] and type(policy['Statement'][0]) is dict:\n statement = policy['Statement'][0]\n if statement['Principal'] and type(statement['Principal']) is str:\n principal = statement['Principal']\n if principal == \"*\":\n self.evaluated_keys = [\"policy/Statement/Principal\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n def get_evaluated_keys(self) -> List[str]:\n return ['policy']\n\n\ncheck = ECRPolicy()\n", "path": "checkov/terraform/checks/resource/aws/ECRPolicy.py"}, {"content": "import json\nfrom typing import List\n\nfrom checkov.common.parsers.node import StrNode\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass ECRPolicy(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure ECR policy is not set to public\"\n id = \"CKV_AWS_32\"\n supported_resources = ['AWS::ECR::Repository']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for public * policy for ecr repository:\n https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html\n :param conf: aws_ecr_repository configuration\n :return: <CheckResult>\n \"\"\"\n self.evaluated_keys = [\"Properties/RepositoryPolicyText/Statement\"]\n if 'Properties' in conf.keys():\n if 'RepositoryPolicyText' in conf['Properties'].keys():\n policy_text = conf['Properties']['RepositoryPolicyText']\n if type(policy_text) in (str, StrNode):\n policy_text = json.loads(str(policy_text))\n if 'Statement' in policy_text.keys():\n for statement_index, statement in enumerate(policy_text['Statement']):\n if 'Principal' in statement.keys():\n for principal_index, principal in enumerate(statement['Principal']):\n if principal == \"*\":\n self.evaluated_keys = [f\"Properties/RepositoryPolicyText/Statement/[{statement_index}]/Principal/[{principal_index}]\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n\ncheck = ECRPolicy()\n", "path": "checkov/cloudformation/checks/resource/aws/ECRPolicy.py"}]}
1,792
642
gh_patches_debug_88
rasdani/github-patches
git_diff
watchdogpolska__small_eod-479
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tagi Stworzenie ekranu Tagi tabela zawiera kolumnę * tag (pole `name` w serializatorze) Ekran paginowy wielkość strony 20 Ekran dostępny z menu bocznego </issue> <code> [start of backend-project/small_eod/tags/serializers.py] 1 from rest_framework import serializers 2 from .models import Tag 3 4 5 class TagSerializer(serializers.ModelSerializer): 6 class Meta: 7 model = Tag 8 fields = [ 9 "name", 10 ] 11 [end of backend-project/small_eod/tags/serializers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend-project/small_eod/tags/serializers.py b/backend-project/small_eod/tags/serializers.py --- a/backend-project/small_eod/tags/serializers.py +++ b/backend-project/small_eod/tags/serializers.py @@ -7,4 +7,5 @@ model = Tag fields = [ "name", + "id", ]
{"golden_diff": "diff --git a/backend-project/small_eod/tags/serializers.py b/backend-project/small_eod/tags/serializers.py\n--- a/backend-project/small_eod/tags/serializers.py\n+++ b/backend-project/small_eod/tags/serializers.py\n@@ -7,4 +7,5 @@\n model = Tag\n fields = [\n \"name\",\n+ \"id\",\n ]\n", "issue": "Tagi\nStworzenie ekranu Tagi\r\n\r\ntabela zawiera kolumn\u0119 \r\n* tag (pole `name` w serializatorze)\r\n\r\nEkran paginowy wielko\u015b\u0107 strony 20\r\nEkran dost\u0119pny z menu bocznego\n", "before_files": [{"content": "from rest_framework import serializers\nfrom .models import Tag\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = [\n \"name\",\n ]\n", "path": "backend-project/small_eod/tags/serializers.py"}]}
655
83
gh_patches_debug_37653
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-2149
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Update timerbot example https://github.com/python-telegram-bot/python-telegram-bot/blob/3b4559dd95d14a4c9a9ca54237b5fe547386b666/examples/timerbot.py#L56-L58 If the job has already run, those lines give an error after v13.0, as APS remvos the job after it has run. But as jobs can't be persisted natively, I think we should just restructure the example to give the jobs the `chat_id` as name and use `get_jobs_by_name` to remove them if necessary Edit: And we should definetely remove the `pass_*` kwargs, those are deprecated since v12 … </issue> <code> [start of examples/timerbot.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # This program is dedicated to the public domain under the CC0 license. 4 5 """ 6 Simple Bot to send timed Telegram messages. 7 8 This Bot uses the Updater class to handle the bot and the JobQueue to send 9 timed messages. 10 11 First, a few handler functions are defined. Then, those functions are passed to 12 the Dispatcher and registered at their respective places. 13 Then, the bot is started and runs until we press Ctrl-C on the command line. 14 15 Usage: 16 Basic Alarm Bot example, sends a message after a set time. 17 Press Ctrl-C on the command line or send a signal to the process to stop the 18 bot. 19 """ 20 21 import logging 22 23 from telegram.ext import Updater, CommandHandler 24 25 # Enable logging 26 logging.basicConfig( 27 format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO 28 ) 29 30 logger = logging.getLogger(__name__) 31 32 33 # Define a few command handlers. These usually take the two arguments update and 34 # context. Error handlers also receive the raised TelegramError object in error. 35 def start(update, context): 36 update.message.reply_text('Hi! Use /set <seconds> to set a timer') 37 38 39 def alarm(context): 40 """Send the alarm message.""" 41 job = context.job 42 context.bot.send_message(job.context, text='Beep!') 43 44 45 def set_timer(update, context): 46 """Add a job to the queue.""" 47 chat_id = update.message.chat_id 48 try: 49 # args[0] should contain the time for the timer in seconds 50 due = int(context.args[0]) 51 if due < 0: 52 update.message.reply_text('Sorry we can not go back to future!') 53 return 54 55 # Add job to queue and stop current one if there is a timer already 56 if 'job' in context.chat_data: 57 old_job = context.chat_data['job'] 58 old_job.schedule_removal() 59 new_job = context.job_queue.run_once(alarm, due, context=chat_id) 60 context.chat_data['job'] = new_job 61 62 update.message.reply_text('Timer successfully set!') 63 64 except (IndexError, ValueError): 65 update.message.reply_text('Usage: /set <seconds>') 66 67 68 def unset(update, context): 69 """Remove the job if the user changed their mind.""" 70 if 'job' not in context.chat_data: 71 update.message.reply_text('You have no active timer') 72 return 73 74 job = context.chat_data['job'] 75 job.schedule_removal() 76 del context.chat_data['job'] 77 78 update.message.reply_text('Timer successfully unset!') 79 80 81 def main(): 82 """Run bot.""" 83 # Create the Updater and pass it your bot's token. 84 # Make sure to set use_context=True to use the new context based callbacks 85 # Post version 12 this will no longer be necessary 86 updater = Updater("TOKEN", use_context=True) 87 88 # Get the dispatcher to register handlers 89 dp = updater.dispatcher 90 91 # on different commands - answer in Telegram 92 dp.add_handler(CommandHandler("start", start)) 93 dp.add_handler(CommandHandler("help", start)) 94 dp.add_handler( 95 CommandHandler("set", set_timer, pass_args=True, pass_job_queue=True, pass_chat_data=True) 96 ) 97 dp.add_handler(CommandHandler("unset", unset, pass_chat_data=True)) 98 99 # Start the Bot 100 updater.start_polling() 101 102 # Block until you press Ctrl-C or the process receives SIGINT, SIGTERM or 103 # SIGABRT. This should be used most of the time, since start_polling() is 104 # non-blocking and will stop the bot gracefully. 105 updater.idle() 106 107 108 if __name__ == '__main__': 109 main() 110 [end of examples/timerbot.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/timerbot.py b/examples/timerbot.py --- a/examples/timerbot.py +++ b/examples/timerbot.py @@ -42,6 +42,16 @@ context.bot.send_message(job.context, text='Beep!') +def remove_job_if_exists(name, context): + """Remove job with given name. Returns whether job was removed.""" + current_jobs = context.job_queue.get_jobs_by_name(name) + if not current_jobs: + return False + for job in current_jobs: + job.schedule_removal() + return True + + def set_timer(update, context): """Add a job to the queue.""" chat_id = update.message.chat_id @@ -52,14 +62,13 @@ update.message.reply_text('Sorry we can not go back to future!') return - # Add job to queue and stop current one if there is a timer already - if 'job' in context.chat_data: - old_job = context.chat_data['job'] - old_job.schedule_removal() - new_job = context.job_queue.run_once(alarm, due, context=chat_id) - context.chat_data['job'] = new_job + job_removed = remove_job_if_exists(str(chat_id), context) + context.job_queue.run_once(alarm, due, context=chat_id, name=str(chat_id)) - update.message.reply_text('Timer successfully set!') + text = 'Timer successfully set!' + if job_removed: + text += ' Old one was removed.' + update.message.reply_text(text) except (IndexError, ValueError): update.message.reply_text('Usage: /set <seconds>') @@ -67,15 +76,10 @@ def unset(update, context): """Remove the job if the user changed their mind.""" - if 'job' not in context.chat_data: - update.message.reply_text('You have no active timer') - return - - job = context.chat_data['job'] - job.schedule_removal() - del context.chat_data['job'] - - update.message.reply_text('Timer successfully unset!') + chat_id = update.message.chat_id + job_removed = remove_job_if_exists(str(chat_id), context) + text = 'Timer successfully cancelled!' if job_removed else 'You have no active timer.' + update.message.reply_text(text) def main(): @@ -91,10 +95,8 @@ # on different commands - answer in Telegram dp.add_handler(CommandHandler("start", start)) dp.add_handler(CommandHandler("help", start)) - dp.add_handler( - CommandHandler("set", set_timer, pass_args=True, pass_job_queue=True, pass_chat_data=True) - ) - dp.add_handler(CommandHandler("unset", unset, pass_chat_data=True)) + dp.add_handler(CommandHandler("set", set_timer)) + dp.add_handler(CommandHandler("unset", unset)) # Start the Bot updater.start_polling()
{"golden_diff": "diff --git a/examples/timerbot.py b/examples/timerbot.py\n--- a/examples/timerbot.py\n+++ b/examples/timerbot.py\n@@ -42,6 +42,16 @@\n context.bot.send_message(job.context, text='Beep!')\n \n \n+def remove_job_if_exists(name, context):\n+ \"\"\"Remove job with given name. Returns whether job was removed.\"\"\"\n+ current_jobs = context.job_queue.get_jobs_by_name(name)\n+ if not current_jobs:\n+ return False\n+ for job in current_jobs:\n+ job.schedule_removal()\n+ return True\n+\n+\n def set_timer(update, context):\n \"\"\"Add a job to the queue.\"\"\"\n chat_id = update.message.chat_id\n@@ -52,14 +62,13 @@\n update.message.reply_text('Sorry we can not go back to future!')\n return\n \n- # Add job to queue and stop current one if there is a timer already\n- if 'job' in context.chat_data:\n- old_job = context.chat_data['job']\n- old_job.schedule_removal()\n- new_job = context.job_queue.run_once(alarm, due, context=chat_id)\n- context.chat_data['job'] = new_job\n+ job_removed = remove_job_if_exists(str(chat_id), context)\n+ context.job_queue.run_once(alarm, due, context=chat_id, name=str(chat_id))\n \n- update.message.reply_text('Timer successfully set!')\n+ text = 'Timer successfully set!'\n+ if job_removed:\n+ text += ' Old one was removed.'\n+ update.message.reply_text(text)\n \n except (IndexError, ValueError):\n update.message.reply_text('Usage: /set <seconds>')\n@@ -67,15 +76,10 @@\n \n def unset(update, context):\n \"\"\"Remove the job if the user changed their mind.\"\"\"\n- if 'job' not in context.chat_data:\n- update.message.reply_text('You have no active timer')\n- return\n-\n- job = context.chat_data['job']\n- job.schedule_removal()\n- del context.chat_data['job']\n-\n- update.message.reply_text('Timer successfully unset!')\n+ chat_id = update.message.chat_id\n+ job_removed = remove_job_if_exists(str(chat_id), context)\n+ text = 'Timer successfully cancelled!' if job_removed else 'You have no active timer.'\n+ update.message.reply_text(text)\n \n \n def main():\n@@ -91,10 +95,8 @@\n # on different commands - answer in Telegram\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"help\", start))\n- dp.add_handler(\n- CommandHandler(\"set\", set_timer, pass_args=True, pass_job_queue=True, pass_chat_data=True)\n- )\n- dp.add_handler(CommandHandler(\"unset\", unset, pass_chat_data=True))\n+ dp.add_handler(CommandHandler(\"set\", set_timer))\n+ dp.add_handler(CommandHandler(\"unset\", unset))\n \n # Start the Bot\n updater.start_polling()\n", "issue": "[BUG] Update timerbot example\nhttps://github.com/python-telegram-bot/python-telegram-bot/blob/3b4559dd95d14a4c9a9ca54237b5fe547386b666/examples/timerbot.py#L56-L58\r\n\r\nIf the job has already run, those lines give an error after v13.0, as APS remvos the job after it has run. But as jobs can't be persisted natively, I think we should just restructure the example to give the jobs the `chat_id` as name and use `get_jobs_by_name` to remove them if necessary\r\n\r\nEdit: And we should definetely remove the `pass_*` kwargs, those are deprecated since v12 \u2026\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This program is dedicated to the public domain under the CC0 license.\n\n\"\"\"\nSimple Bot to send timed Telegram messages.\n\nThis Bot uses the Updater class to handle the bot and the JobQueue to send\ntimed messages.\n\nFirst, a few handler functions are defined. Then, those functions are passed to\nthe Dispatcher and registered at their respective places.\nThen, the bot is started and runs until we press Ctrl-C on the command line.\n\nUsage:\nBasic Alarm Bot example, sends a message after a set time.\nPress Ctrl-C on the command line or send a signal to the process to stop the\nbot.\n\"\"\"\n\nimport logging\n\nfrom telegram.ext import Updater, CommandHandler\n\n# Enable logging\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO\n)\n\nlogger = logging.getLogger(__name__)\n\n\n# Define a few command handlers. These usually take the two arguments update and\n# context. Error handlers also receive the raised TelegramError object in error.\ndef start(update, context):\n update.message.reply_text('Hi! Use /set <seconds> to set a timer')\n\n\ndef alarm(context):\n \"\"\"Send the alarm message.\"\"\"\n job = context.job\n context.bot.send_message(job.context, text='Beep!')\n\n\ndef set_timer(update, context):\n \"\"\"Add a job to the queue.\"\"\"\n chat_id = update.message.chat_id\n try:\n # args[0] should contain the time for the timer in seconds\n due = int(context.args[0])\n if due < 0:\n update.message.reply_text('Sorry we can not go back to future!')\n return\n\n # Add job to queue and stop current one if there is a timer already\n if 'job' in context.chat_data:\n old_job = context.chat_data['job']\n old_job.schedule_removal()\n new_job = context.job_queue.run_once(alarm, due, context=chat_id)\n context.chat_data['job'] = new_job\n\n update.message.reply_text('Timer successfully set!')\n\n except (IndexError, ValueError):\n update.message.reply_text('Usage: /set <seconds>')\n\n\ndef unset(update, context):\n \"\"\"Remove the job if the user changed their mind.\"\"\"\n if 'job' not in context.chat_data:\n update.message.reply_text('You have no active timer')\n return\n\n job = context.chat_data['job']\n job.schedule_removal()\n del context.chat_data['job']\n\n update.message.reply_text('Timer successfully unset!')\n\n\ndef main():\n \"\"\"Run bot.\"\"\"\n # Create the Updater and pass it your bot's token.\n # Make sure to set use_context=True to use the new context based callbacks\n # Post version 12 this will no longer be necessary\n updater = Updater(\"TOKEN\", use_context=True)\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # on different commands - answer in Telegram\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"help\", start))\n dp.add_handler(\n CommandHandler(\"set\", set_timer, pass_args=True, pass_job_queue=True, pass_chat_data=True)\n )\n dp.add_handler(CommandHandler(\"unset\", unset, pass_chat_data=True))\n\n # Start the Bot\n updater.start_polling()\n\n # Block until you press Ctrl-C or the process receives SIGINT, SIGTERM or\n # SIGABRT. This should be used most of the time, since start_polling() is\n # non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/timerbot.py"}]}
1,734
668
gh_patches_debug_32102
rasdani/github-patches
git_diff
getsentry__sentry-2226
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 500 error when rename team slug Error raised if same slug already exists Traceback: ``` File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/core/handlers/base.py", line 112, in get_response response = wrapped_callback(request, *callback_args, **callback_kwargs) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/views/generic/base.py", line 69, in view return self.dispatch(request, *args, **kwargs) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/utils/decorators.py", line 29, in _wrapper return bound_func(*args, **kwargs) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/utils/decorators.py", line 99, in _wrapped_view response = view_func(request, *args, **kwargs) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/utils/decorators.py", line 25, in bound_func return func(self, *args2, **kwargs2) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/sentry/web/frontend/base.py", line 144, in dispatch return self.handle(request, *args, **kwargs) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/sentry/web/frontend/team_settings.py", line 37, in handle team = form.save() File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/forms/models.py", line 446, in save construct=False) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/forms/models.py", line 99, in save_instance instance.save() File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/sentry/models/team.py", line 141, in save super(Team, self).save(*args, **kwargs) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/base.py", line 545, in save force_update=force_update, update_fields=update_fields) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/base.py", line 573, in save_base updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/base.py", line 635, in _save_table forced_update) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/base.py", line 679, in _do_update return filtered._update(values) > 0 File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/query.py", line 510, in _update return query.get_compiler(self.db).execute_sql(None) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/sql/compiler.py", line 980, in execute_sql cursor = super(SQLUpdateCompiler, self).execute_sql(result_type) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/sql/compiler.py", line 786, in execute_sql cursor.execute(sql, params) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/backends/util.py", line 53, in execute return self.cursor.execute(sql, params) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/utils.py", line 99, in __exit__ six.reraise(dj_exc_type, dj_exc_value, traceback) File "/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/backends/util.py", line 53, in execute return self.cursor.execute(sql, params) IntegrityError: duplicate key value violates unique constraint "sentry_team_organization_id_1e0ece47434a2ed_uniq" DETAIL: Key (organization_id, slug)=(1, adwile) already exists. ``` </issue> <code> [start of src/sentry/web/frontend/team_settings.py] 1 from __future__ import absolute_import 2 3 from django import forms 4 from django.contrib import messages 5 from django.core.urlresolvers import reverse 6 from django.http import HttpResponseRedirect 7 from django.utils.translation import ugettext_lazy as _ 8 9 from sentry.models import AuditLogEntry, AuditLogEntryEvent, Team 10 from sentry.web.frontend.base import TeamView 11 12 13 class EditTeamForm(forms.ModelForm): 14 class Meta: 15 fields = ('name', 'slug',) 16 model = Team 17 18 19 class TeamSettingsView(TeamView): 20 required_scope = 'team:write' 21 22 def get_form(self, request, team): 23 return EditTeamForm(request.POST or None, instance=team) 24 25 def handle(self, request, organization, team): 26 form = self.get_form(request, team) 27 if form.is_valid(): 28 team = form.save() 29 30 AuditLogEntry.objects.create( 31 organization=organization, 32 actor=request.user, 33 ip_address=request.META['REMOTE_ADDR'], 34 target_object=team.id, 35 event=AuditLogEntryEvent.TEAM_EDIT, 36 data=team.get_audit_log_data(), 37 ) 38 39 messages.add_message(request, messages.SUCCESS, 40 _('Changes to your team were saved.')) 41 42 return HttpResponseRedirect(reverse('sentry-manage-team', args=[organization.slug, team.slug])) 43 44 if request.user.is_active_superuser(): 45 can_remove_team = True 46 else: 47 can_remove_team = request.access.has_team_scope(team, 'team:delete') 48 49 context = { 50 'form': form, 51 'can_remove_team': can_remove_team, 52 } 53 54 return self.respond('sentry/teams/manage.html', context) 55 [end of src/sentry/web/frontend/team_settings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/sentry/web/frontend/team_settings.py b/src/sentry/web/frontend/team_settings.py --- a/src/sentry/web/frontend/team_settings.py +++ b/src/sentry/web/frontend/team_settings.py @@ -3,6 +3,7 @@ from django import forms from django.contrib import messages from django.core.urlresolvers import reverse +from django.db import IntegrityError from django.http import HttpResponseRedirect from django.utils.translation import ugettext_lazy as _ @@ -23,21 +24,27 @@ return EditTeamForm(request.POST or None, instance=team) def handle(self, request, organization, team): + old_slug = team.slug form = self.get_form(request, team) if form.is_valid(): - team = form.save() - - AuditLogEntry.objects.create( - organization=organization, - actor=request.user, - ip_address=request.META['REMOTE_ADDR'], - target_object=team.id, - event=AuditLogEntryEvent.TEAM_EDIT, - data=team.get_audit_log_data(), - ) - - messages.add_message(request, messages.SUCCESS, - _('Changes to your team were saved.')) + try: + team = form.save() + except IntegrityError: + team.slug = old_slug + messages.add_message(request, messages.ERROR, + _('Changes to your team failed. Slug already exists.')) + else: + AuditLogEntry.objects.create( + organization=organization, + actor=request.user, + ip_address=request.META['REMOTE_ADDR'], + target_object=team.id, + event=AuditLogEntryEvent.TEAM_EDIT, + data=team.get_audit_log_data(), + ) + + messages.add_message(request, messages.SUCCESS, + _('Changes to your team were saved.')) return HttpResponseRedirect(reverse('sentry-manage-team', args=[organization.slug, team.slug]))
{"golden_diff": "diff --git a/src/sentry/web/frontend/team_settings.py b/src/sentry/web/frontend/team_settings.py\n--- a/src/sentry/web/frontend/team_settings.py\n+++ b/src/sentry/web/frontend/team_settings.py\n@@ -3,6 +3,7 @@\n from django import forms\n from django.contrib import messages\n from django.core.urlresolvers import reverse\n+from django.db import IntegrityError\n from django.http import HttpResponseRedirect\n from django.utils.translation import ugettext_lazy as _\n \n@@ -23,21 +24,27 @@\n return EditTeamForm(request.POST or None, instance=team)\n \n def handle(self, request, organization, team):\n+ old_slug = team.slug\n form = self.get_form(request, team)\n if form.is_valid():\n- team = form.save()\n-\n- AuditLogEntry.objects.create(\n- organization=organization,\n- actor=request.user,\n- ip_address=request.META['REMOTE_ADDR'],\n- target_object=team.id,\n- event=AuditLogEntryEvent.TEAM_EDIT,\n- data=team.get_audit_log_data(),\n- )\n-\n- messages.add_message(request, messages.SUCCESS,\n- _('Changes to your team were saved.'))\n+ try:\n+ team = form.save()\n+ except IntegrityError:\n+ team.slug = old_slug\n+ messages.add_message(request, messages.ERROR,\n+ _('Changes to your team failed. Slug already exists.'))\n+ else:\n+ AuditLogEntry.objects.create(\n+ organization=organization,\n+ actor=request.user,\n+ ip_address=request.META['REMOTE_ADDR'],\n+ target_object=team.id,\n+ event=AuditLogEntryEvent.TEAM_EDIT,\n+ data=team.get_audit_log_data(),\n+ )\n+\n+ messages.add_message(request, messages.SUCCESS,\n+ _('Changes to your team were saved.'))\n \n return HttpResponseRedirect(reverse('sentry-manage-team', args=[organization.slug, team.slug]))\n", "issue": "500 error when rename team slug\nError raised if same slug already exists\n\nTraceback:\n\n```\nFile \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/core/handlers/base.py\", line 112, in get_response\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/views/generic/base.py\", line 69, in view\n return self.dispatch(request, *args, **kwargs)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/utils/decorators.py\", line 29, in _wrapper\n return bound_func(*args, **kwargs)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/utils/decorators.py\", line 99, in _wrapped_view\n response = view_func(request, *args, **kwargs)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/utils/decorators.py\", line 25, in bound_func\n return func(self, *args2, **kwargs2)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/sentry/web/frontend/base.py\", line 144, in dispatch\n return self.handle(request, *args, **kwargs)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/sentry/web/frontend/team_settings.py\", line 37, in handle\n team = form.save()\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/forms/models.py\", line 446, in save\n construct=False)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/forms/models.py\", line 99, in save_instance\n instance.save()\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/sentry/models/team.py\", line 141, in save\n super(Team, self).save(*args, **kwargs)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/base.py\", line 545, in save\n force_update=force_update, update_fields=update_fields)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/base.py\", line 573, in save_base\n updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/base.py\", line 635, in _save_table\n forced_update)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/base.py\", line 679, in _do_update\n return filtered._update(values) > 0\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/query.py\", line 510, in _update\n return query.get_compiler(self.db).execute_sql(None)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/sql/compiler.py\", line 980, in execute_sql\n cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/models/sql/compiler.py\", line 786, in execute_sql\n cursor.execute(sql, params)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/backends/util.py\", line 53, in execute\n return self.cursor.execute(sql, params)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/utils.py\", line 99, in __exit__\n six.reraise(dj_exc_type, dj_exc_value, traceback)\n File \"/home/www/pyenv/versions/sentry/lib/python2.7/site-packages/django/db/backends/util.py\", line 53, in execute\n return self.cursor.execute(sql, params)\n IntegrityError: duplicate key value violates unique constraint \"sentry_team_organization_id_1e0ece47434a2ed_uniq\"\n DETAIL: Key (organization_id, slug)=(1, adwile) already exists.\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sentry.models import AuditLogEntry, AuditLogEntryEvent, Team\nfrom sentry.web.frontend.base import TeamView\n\n\nclass EditTeamForm(forms.ModelForm):\n class Meta:\n fields = ('name', 'slug',)\n model = Team\n\n\nclass TeamSettingsView(TeamView):\n required_scope = 'team:write'\n\n def get_form(self, request, team):\n return EditTeamForm(request.POST or None, instance=team)\n\n def handle(self, request, organization, team):\n form = self.get_form(request, team)\n if form.is_valid():\n team = form.save()\n\n AuditLogEntry.objects.create(\n organization=organization,\n actor=request.user,\n ip_address=request.META['REMOTE_ADDR'],\n target_object=team.id,\n event=AuditLogEntryEvent.TEAM_EDIT,\n data=team.get_audit_log_data(),\n )\n\n messages.add_message(request, messages.SUCCESS,\n _('Changes to your team were saved.'))\n\n return HttpResponseRedirect(reverse('sentry-manage-team', args=[organization.slug, team.slug]))\n\n if request.user.is_active_superuser():\n can_remove_team = True\n else:\n can_remove_team = request.access.has_team_scope(team, 'team:delete')\n\n context = {\n 'form': form,\n 'can_remove_team': can_remove_team,\n }\n\n return self.respond('sentry/teams/manage.html', context)\n", "path": "src/sentry/web/frontend/team_settings.py"}]}
1,981
419
gh_patches_debug_29925
rasdani/github-patches
git_diff
kornia__kornia-2009
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update `make test-all` ```Makefile # TODO: Add cuda-float16 when #649 is solved test-all: FORCE pytest -v --device all --dtype float32,float64 --cov=kornia test/ --flake8 --mypy ``` Maybe instead of just adding `float16`, we can use `--dtype all`? (https://github.com/kornia/kornia/blob/5d1d8cc8c4fb3e398b429edd2cc25ef40d6299ce/conftest.py#L77) </issue> <code> [start of kornia/enhance/core.py] 1 from kornia.core import Module, Tensor 2 from kornia.testing import KORNIA_CHECK, KORNIA_CHECK_IS_TENSOR 3 4 5 def add_weighted(src1: Tensor, alpha, src2: Tensor, beta, gamma) -> Tensor: 6 r"""Calculate the weighted sum of two Tensors. 7 8 .. image:: _static/img/add_weighted.png 9 10 The function calculates the weighted sum of two Tensors as follows: 11 12 .. math:: 13 out = src1 * alpha + src2 * beta + gamma 14 15 Args: 16 src1: Tensor with an arbitrary shape, equal to shape of src2. 17 alpha: weight of the src1 elements as Union[float, Tensor]. 18 src2: Tensor with an arbitrary shape, equal to shape of src1. 19 beta: weight of the src2 elements as Union[float, Tensor]. 20 gamma: scalar added to each sum as Union[float, Tensor]. 21 22 Returns: 23 Weighted Tensor with shape equal to src1 and src2 shapes. 24 25 Example: 26 >>> input1 = torch.rand(1, 1, 5, 5) 27 >>> input2 = torch.rand(1, 1, 5, 5) 28 >>> output = add_weighted(input1, 0.5, input2, 0.5, 1.0) 29 >>> output.shape 30 torch.Size([1, 1, 5, 5]) 31 32 Notes: 33 Tensor alpha/beta/gamma have to be with shape broadcastable to src1 and src2 shapes. 34 """ 35 KORNIA_CHECK_IS_TENSOR(src1) 36 KORNIA_CHECK_IS_TENSOR(src2) 37 KORNIA_CHECK(src1.shape == src2.shape, f"src1 and src2 have different shapes. Got {src1.shape} and {src2.shape}") 38 if isinstance(alpha, Tensor): 39 KORNIA_CHECK(src1.shape == alpha.shape, "alpha has a different shape than src.") 40 if isinstance(beta, Tensor): 41 KORNIA_CHECK(src1.shape == beta.shape, "beta has a different shape than src.") 42 if isinstance(gamma, Tensor): 43 KORNIA_CHECK(src1.shape == gamma.shape, "gamma has a different shape than src.") 44 45 return src1 * alpha + src2 * beta + gamma 46 47 48 class AddWeighted(Module): 49 r"""Calculate the weighted sum of two Tensors. 50 51 The function calculates the weighted sum of two Tensors as follows: 52 53 .. math:: 54 out = src1 * alpha + src2 * beta + gamma 55 56 Args: 57 alpha: weight of the src1 elements as Union[float, Tensor]. 58 beta: weight of the src2 elements as Union[float, Tensor]. 59 gamma: scalar added to each sum as Union[float, Tensor]. 60 61 Shape: 62 - Input1: Tensor with an arbitrary shape, equal to shape of Input2. 63 - Input2: Tensor with an arbitrary shape, equal to shape of Input1. 64 - Output: Weighted tensor with shape equal to src1 and src2 shapes. 65 66 Example: 67 >>> input1 = torch.rand(1, 1, 5, 5) 68 >>> input2 = torch.rand(1, 1, 5, 5) 69 >>> output = AddWeighted(0.5, 0.5, 1.0)(input1, input2) 70 >>> output.shape 71 torch.Size([1, 1, 5, 5]) 72 73 Notes: 74 Tensor alpha/beta/gamma have to be with shape broadcastable to src1 and src2 shapes. 75 """ 76 77 def __init__(self, alpha, beta, gamma) -> None: 78 super().__init__() 79 self.alpha = alpha 80 self.beta = beta 81 self.gamma = gamma 82 83 def forward(self, src1: Tensor, src2: Tensor) -> Tensor: 84 return add_weighted(src1, self.alpha, src2, self.beta, self.gamma) 85 [end of kornia/enhance/core.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kornia/enhance/core.py b/kornia/enhance/core.py --- a/kornia/enhance/core.py +++ b/kornia/enhance/core.py @@ -1,8 +1,12 @@ -from kornia.core import Module, Tensor +from typing import Union + +from kornia.core import Module, Tensor, tensor from kornia.testing import KORNIA_CHECK, KORNIA_CHECK_IS_TENSOR -def add_weighted(src1: Tensor, alpha, src2: Tensor, beta, gamma) -> Tensor: +def add_weighted( + src1: Tensor, alpha: Union[float, Tensor], src2: Tensor, beta: Union[float, Tensor], gamma: Union[float, Tensor] +) -> Tensor: r"""Calculate the weighted sum of two Tensors. .. image:: _static/img/add_weighted.png @@ -35,12 +39,21 @@ KORNIA_CHECK_IS_TENSOR(src1) KORNIA_CHECK_IS_TENSOR(src2) KORNIA_CHECK(src1.shape == src2.shape, f"src1 and src2 have different shapes. Got {src1.shape} and {src2.shape}") + if isinstance(alpha, Tensor): KORNIA_CHECK(src1.shape == alpha.shape, "alpha has a different shape than src.") + else: + alpha = tensor(alpha, dtype=src1.dtype, device=src1.device) + if isinstance(beta, Tensor): KORNIA_CHECK(src1.shape == beta.shape, "beta has a different shape than src.") + else: + beta = tensor(beta, dtype=src1.dtype, device=src1.device) + if isinstance(gamma, Tensor): KORNIA_CHECK(src1.shape == gamma.shape, "gamma has a different shape than src.") + else: + gamma = tensor(gamma, dtype=src1.dtype, device=src1.device) return src1 * alpha + src2 * beta + gamma
{"golden_diff": "diff --git a/kornia/enhance/core.py b/kornia/enhance/core.py\n--- a/kornia/enhance/core.py\n+++ b/kornia/enhance/core.py\n@@ -1,8 +1,12 @@\n-from kornia.core import Module, Tensor\n+from typing import Union\n+\n+from kornia.core import Module, Tensor, tensor\n from kornia.testing import KORNIA_CHECK, KORNIA_CHECK_IS_TENSOR\n \n \n-def add_weighted(src1: Tensor, alpha, src2: Tensor, beta, gamma) -> Tensor:\n+def add_weighted(\n+ src1: Tensor, alpha: Union[float, Tensor], src2: Tensor, beta: Union[float, Tensor], gamma: Union[float, Tensor]\n+) -> Tensor:\n r\"\"\"Calculate the weighted sum of two Tensors.\n \n .. image:: _static/img/add_weighted.png\n@@ -35,12 +39,21 @@\n KORNIA_CHECK_IS_TENSOR(src1)\n KORNIA_CHECK_IS_TENSOR(src2)\n KORNIA_CHECK(src1.shape == src2.shape, f\"src1 and src2 have different shapes. Got {src1.shape} and {src2.shape}\")\n+\n if isinstance(alpha, Tensor):\n KORNIA_CHECK(src1.shape == alpha.shape, \"alpha has a different shape than src.\")\n+ else:\n+ alpha = tensor(alpha, dtype=src1.dtype, device=src1.device)\n+\n if isinstance(beta, Tensor):\n KORNIA_CHECK(src1.shape == beta.shape, \"beta has a different shape than src.\")\n+ else:\n+ beta = tensor(beta, dtype=src1.dtype, device=src1.device)\n+\n if isinstance(gamma, Tensor):\n KORNIA_CHECK(src1.shape == gamma.shape, \"gamma has a different shape than src.\")\n+ else:\n+ gamma = tensor(gamma, dtype=src1.dtype, device=src1.device)\n \n return src1 * alpha + src2 * beta + gamma\n", "issue": "Update `make test-all`\n```Makefile\r\n# TODO: Add cuda-float16 when #649 is solved\r\ntest-all: FORCE\r\n\tpytest -v --device all --dtype float32,float64 --cov=kornia test/ --flake8 --mypy\r\n```\r\nMaybe instead of just adding `float16`, we can use `--dtype all`? (https://github.com/kornia/kornia/blob/5d1d8cc8c4fb3e398b429edd2cc25ef40d6299ce/conftest.py#L77)\n", "before_files": [{"content": "from kornia.core import Module, Tensor\nfrom kornia.testing import KORNIA_CHECK, KORNIA_CHECK_IS_TENSOR\n\n\ndef add_weighted(src1: Tensor, alpha, src2: Tensor, beta, gamma) -> Tensor:\n r\"\"\"Calculate the weighted sum of two Tensors.\n\n .. image:: _static/img/add_weighted.png\n\n The function calculates the weighted sum of two Tensors as follows:\n\n .. math::\n out = src1 * alpha + src2 * beta + gamma\n\n Args:\n src1: Tensor with an arbitrary shape, equal to shape of src2.\n alpha: weight of the src1 elements as Union[float, Tensor].\n src2: Tensor with an arbitrary shape, equal to shape of src1.\n beta: weight of the src2 elements as Union[float, Tensor].\n gamma: scalar added to each sum as Union[float, Tensor].\n\n Returns:\n Weighted Tensor with shape equal to src1 and src2 shapes.\n\n Example:\n >>> input1 = torch.rand(1, 1, 5, 5)\n >>> input2 = torch.rand(1, 1, 5, 5)\n >>> output = add_weighted(input1, 0.5, input2, 0.5, 1.0)\n >>> output.shape\n torch.Size([1, 1, 5, 5])\n\n Notes:\n Tensor alpha/beta/gamma have to be with shape broadcastable to src1 and src2 shapes.\n \"\"\"\n KORNIA_CHECK_IS_TENSOR(src1)\n KORNIA_CHECK_IS_TENSOR(src2)\n KORNIA_CHECK(src1.shape == src2.shape, f\"src1 and src2 have different shapes. Got {src1.shape} and {src2.shape}\")\n if isinstance(alpha, Tensor):\n KORNIA_CHECK(src1.shape == alpha.shape, \"alpha has a different shape than src.\")\n if isinstance(beta, Tensor):\n KORNIA_CHECK(src1.shape == beta.shape, \"beta has a different shape than src.\")\n if isinstance(gamma, Tensor):\n KORNIA_CHECK(src1.shape == gamma.shape, \"gamma has a different shape than src.\")\n\n return src1 * alpha + src2 * beta + gamma\n\n\nclass AddWeighted(Module):\n r\"\"\"Calculate the weighted sum of two Tensors.\n\n The function calculates the weighted sum of two Tensors as follows:\n\n .. math::\n out = src1 * alpha + src2 * beta + gamma\n\n Args:\n alpha: weight of the src1 elements as Union[float, Tensor].\n beta: weight of the src2 elements as Union[float, Tensor].\n gamma: scalar added to each sum as Union[float, Tensor].\n\n Shape:\n - Input1: Tensor with an arbitrary shape, equal to shape of Input2.\n - Input2: Tensor with an arbitrary shape, equal to shape of Input1.\n - Output: Weighted tensor with shape equal to src1 and src2 shapes.\n\n Example:\n >>> input1 = torch.rand(1, 1, 5, 5)\n >>> input2 = torch.rand(1, 1, 5, 5)\n >>> output = AddWeighted(0.5, 0.5, 1.0)(input1, input2)\n >>> output.shape\n torch.Size([1, 1, 5, 5])\n\n Notes:\n Tensor alpha/beta/gamma have to be with shape broadcastable to src1 and src2 shapes.\n \"\"\"\n\n def __init__(self, alpha, beta, gamma) -> None:\n super().__init__()\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n\n def forward(self, src1: Tensor, src2: Tensor) -> Tensor:\n return add_weighted(src1, self.alpha, src2, self.beta, self.gamma)\n", "path": "kornia/enhance/core.py"}]}
1,690
442
gh_patches_debug_18224
rasdani/github-patches
git_diff
internetarchive__openlibrary-6910
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Data Dumps not auto-generating Despite #5263 being resolved, it looks like the data dumps weren't uploaded on July 1st :/ ### Relevant URL? * https://github.com/internetarchive/openlibrary/wiki/Generating-Data-Dumps * https://archive.org/details/ol_exports?sort=-publicdate Related issues and pull requests: * #3989 * #4621 * #4671 * #4723 * #5546 * #5673 * #5719 * #5892 - Worth reading! * #6158 * #6163 Related files: * [`docker-compose.production.yml`](../blob/master/docker-compose.production.yml#L90) defines `cron-jobs` Docker container. * [`docker/ol-cron-start.sh`](../blob/master/docker/ol-cron-start.sh) sets up the cron tasks. * [olsystem: `/etc/cron.d/openlibrary.ol_home0`](https://github.com/internetarchive/olsystem/blob/master/etc/cron.d/openlibrary.ol_home0#L11) defines the actual job * modify and then to reactivate do: `crontab /etc/cron.d/openlibrary.ol_home0` Also: https://cron.help * [x] internetarchive/olsystem#140 * [`scripts/oldump.sh`](../blob/master/scripts/oldump.sh) is the script that gets run. * [x] #5860 ### Proposal & Constraints - Run manually for now ### Related files <!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. --> ### Stakeholders @mekarpeles @jimman2003 </issue> <code> [start of scripts/oldump.py] 1 #!/usr/bin/env python 2 3 import logging 4 import os 5 import sys 6 from datetime import datetime 7 8 import _init_path # noqa: F401 Imported for its side effect of setting PYTHONPATH 9 10 logger = logging.getLogger(__file__) 11 logger.setLevel(logging.DEBUG) 12 13 14 def log(*args) -> None: 15 args_str = " ".join(str(a) for a in args) 16 msg = f"{datetime.now():%Y-%m-%d %H:%M:%S} [openlibrary.dump] {args_str}" 17 logger.info(msg) 18 print(msg, file=sys.stderr) 19 20 21 if __name__ == "__main__": 22 from infogami import config 23 from openlibrary.config import load_config 24 from openlibrary.data import dump 25 from openlibrary.utils.sentry import Sentry 26 27 log("{} on Python {}.{}.{}".format(sys.argv, *sys.version_info)) # Python 3.10.4 28 29 ol_config = os.getenv("OL_CONFIG") 30 if ol_config: 31 logger.info(f"loading config from {ol_config}") 32 load_config(ol_config) 33 sentry = Sentry(getattr(config, "sentry_cron_jobs", {})) 34 if sentry.enabled: 35 sentry.init() 36 log(f"sentry.enabled = {bool(ol_config and sentry.enabled)}") 37 38 dump.main(sys.argv[1], sys.argv[2:]) 39 [end of scripts/oldump.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/oldump.py b/scripts/oldump.py --- a/scripts/oldump.py +++ b/scripts/oldump.py @@ -19,6 +19,7 @@ if __name__ == "__main__": + from contextlib import redirect_stdout from infogami import config from openlibrary.config import load_config from openlibrary.data import dump @@ -29,7 +30,11 @@ ol_config = os.getenv("OL_CONFIG") if ol_config: logger.info(f"loading config from {ol_config}") - load_config(ol_config) + # Squelch output from infobase (needed for sentry setup) + # So it doesn't end up in our data dumps body + with open(os.devnull, 'w') as devnull: + with redirect_stdout(devnull): + load_config(ol_config) sentry = Sentry(getattr(config, "sentry_cron_jobs", {})) if sentry.enabled: sentry.init()
{"golden_diff": "diff --git a/scripts/oldump.py b/scripts/oldump.py\n--- a/scripts/oldump.py\n+++ b/scripts/oldump.py\n@@ -19,6 +19,7 @@\n \n \n if __name__ == \"__main__\":\n+ from contextlib import redirect_stdout\n from infogami import config\n from openlibrary.config import load_config\n from openlibrary.data import dump\n@@ -29,7 +30,11 @@\n ol_config = os.getenv(\"OL_CONFIG\")\n if ol_config:\n logger.info(f\"loading config from {ol_config}\")\n- load_config(ol_config)\n+ # Squelch output from infobase (needed for sentry setup)\n+ # So it doesn't end up in our data dumps body\n+ with open(os.devnull, 'w') as devnull:\n+ with redirect_stdout(devnull):\n+ load_config(ol_config)\n sentry = Sentry(getattr(config, \"sentry_cron_jobs\", {}))\n if sentry.enabled:\n sentry.init()\n", "issue": "Data Dumps not auto-generating\nDespite #5263 being resolved, it looks like the data dumps weren't uploaded on July 1st :/ \r\n\r\n### Relevant URL?\r\n* https://github.com/internetarchive/openlibrary/wiki/Generating-Data-Dumps\r\n* https://archive.org/details/ol_exports?sort=-publicdate\r\n\r\nRelated issues and pull requests:\r\n* #3989 \r\n* #4621\r\n* #4671\r\n* #4723\r\n* #5546\r\n* #5673 \r\n* #5719 \r\n* #5892 - Worth reading!\r\n* #6158\r\n* #6163\r\n\r\nRelated files:\r\n* [`docker-compose.production.yml`](../blob/master/docker-compose.production.yml#L90) defines `cron-jobs` Docker container.\r\n* [`docker/ol-cron-start.sh`](../blob/master/docker/ol-cron-start.sh) sets up the cron tasks.\r\n* [olsystem: `/etc/cron.d/openlibrary.ol_home0`](https://github.com/internetarchive/olsystem/blob/master/etc/cron.d/openlibrary.ol_home0#L11) defines the actual job\r\n * modify and then to reactivate do: `crontab /etc/cron.d/openlibrary.ol_home0` Also: https://cron.help\r\n * [x] internetarchive/olsystem#140\r\n* [`scripts/oldump.sh`](../blob/master/scripts/oldump.sh) is the script that gets run.\r\n * [x] #5860\r\n\r\n### Proposal & Constraints\r\n- Run manually for now\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n@mekarpeles @jimman2003 \r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport logging\nimport os\nimport sys\nfrom datetime import datetime\n\nimport _init_path # noqa: F401 Imported for its side effect of setting PYTHONPATH\n\nlogger = logging.getLogger(__file__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef log(*args) -> None:\n args_str = \" \".join(str(a) for a in args)\n msg = f\"{datetime.now():%Y-%m-%d %H:%M:%S} [openlibrary.dump] {args_str}\"\n logger.info(msg)\n print(msg, file=sys.stderr)\n\n\nif __name__ == \"__main__\":\n from infogami import config\n from openlibrary.config import load_config\n from openlibrary.data import dump\n from openlibrary.utils.sentry import Sentry\n\n log(\"{} on Python {}.{}.{}\".format(sys.argv, *sys.version_info)) # Python 3.10.4\n\n ol_config = os.getenv(\"OL_CONFIG\")\n if ol_config:\n logger.info(f\"loading config from {ol_config}\")\n load_config(ol_config)\n sentry = Sentry(getattr(config, \"sentry_cron_jobs\", {}))\n if sentry.enabled:\n sentry.init()\n log(f\"sentry.enabled = {bool(ol_config and sentry.enabled)}\")\n\n dump.main(sys.argv[1], sys.argv[2:])\n", "path": "scripts/oldump.py"}]}
1,313
229
gh_patches_debug_9976
rasdani/github-patches
git_diff
mkdocs__mkdocs-2421
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> file-system race condition while testing with mkdocs serve when mkdocs serve is shutting down, it cleans up after itself removing the temporary directory created earlier. the condition whether the directory needs to be removed is unchecked and prone to a file system race condition (shared resource). given the directory is already removed on the file-system while shutting down, mkdocs serve throws an exception and exits in failure. this error can be safely defined out of existence by removing the temporary directory only if it exists, will suggest a PR for this report. /E: created #2421 </issue> <code> [start of mkdocs/commands/serve.py] 1 import logging 2 import shutil 3 import tempfile 4 5 from os.path import isfile, join 6 from mkdocs.commands.build import build 7 from mkdocs.config import load_config 8 from mkdocs.exceptions import Abort 9 from mkdocs.livereload import LiveReloadServer 10 11 log = logging.getLogger(__name__) 12 13 14 def serve(config_file=None, dev_addr=None, strict=None, theme=None, 15 theme_dir=None, livereload='livereload', watch_theme=False, **kwargs): 16 """ 17 Start the MkDocs development server 18 19 By default it will serve the documentation on http://localhost:8000/ and 20 it will rebuild the documentation and refresh the page automatically 21 whenever a file is edited. 22 """ 23 24 # Create a temporary build directory, and set some options to serve it 25 # PY2 returns a byte string by default. The Unicode prefix ensures a Unicode 26 # string is returned. And it makes MkDocs temp dirs easier to identify. 27 site_dir = tempfile.mkdtemp(prefix='mkdocs_') 28 29 def builder(): 30 log.info("Building documentation...") 31 config = load_config( 32 config_file=config_file, 33 dev_addr=dev_addr, 34 strict=strict, 35 theme=theme, 36 theme_dir=theme_dir, 37 site_dir=site_dir, 38 **kwargs 39 ) 40 # Override a few config settings after validation 41 config['site_url'] = 'http://{}/'.format(config['dev_addr']) 42 43 live_server = livereload in ['dirty', 'livereload'] 44 dirty = livereload == 'dirty' 45 build(config, live_server=live_server, dirty=dirty) 46 return config 47 48 try: 49 # Perform the initial build 50 config = builder() 51 52 host, port = config['dev_addr'] 53 54 server = LiveReloadServer(builder=builder, host=host, port=port, root=site_dir) 55 56 def error_handler(code): 57 if code in (404, 500): 58 error_page = join(site_dir, f'{code}.html') 59 if isfile(error_page): 60 with open(error_page, 'rb') as f: 61 return f.read() 62 63 server.error_handler = error_handler 64 65 if livereload in ['livereload', 'dirty']: 66 # Watch the documentation files, the config file and the theme files. 67 server.watch(config['docs_dir']) 68 server.watch(config['config_file_path']) 69 70 if watch_theme: 71 for d in config['theme'].dirs: 72 server.watch(d) 73 74 # Run `serve` plugin events. 75 server = config['plugins'].run_event('serve', server, config=config, builder=builder) 76 77 try: 78 server.serve() 79 except KeyboardInterrupt: 80 log.info("Shutting down...") 81 finally: 82 server.shutdown() 83 except OSError as e: # pragma: no cover 84 # Avoid ugly, unhelpful traceback 85 raise Abort(str(e)) 86 finally: 87 shutil.rmtree(site_dir) 88 [end of mkdocs/commands/serve.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mkdocs/commands/serve.py b/mkdocs/commands/serve.py --- a/mkdocs/commands/serve.py +++ b/mkdocs/commands/serve.py @@ -2,7 +2,7 @@ import shutil import tempfile -from os.path import isfile, join +from os.path import isdir, isfile, join from mkdocs.commands.build import build from mkdocs.config import load_config from mkdocs.exceptions import Abort @@ -84,4 +84,5 @@ # Avoid ugly, unhelpful traceback raise Abort(str(e)) finally: - shutil.rmtree(site_dir) + if isdir(site_dir): + shutil.rmtree(site_dir)
{"golden_diff": "diff --git a/mkdocs/commands/serve.py b/mkdocs/commands/serve.py\n--- a/mkdocs/commands/serve.py\n+++ b/mkdocs/commands/serve.py\n@@ -2,7 +2,7 @@\n import shutil\n import tempfile\n \n-from os.path import isfile, join\n+from os.path import isdir, isfile, join\n from mkdocs.commands.build import build\n from mkdocs.config import load_config\n from mkdocs.exceptions import Abort\n@@ -84,4 +84,5 @@\n # Avoid ugly, unhelpful traceback\n raise Abort(str(e))\n finally:\n- shutil.rmtree(site_dir)\n+ if isdir(site_dir):\n+ shutil.rmtree(site_dir)\n", "issue": "file-system race condition while testing with mkdocs serve\nwhen mkdocs serve is shutting down, it cleans up after itself removing the temporary directory created earlier.\r\n\r\nthe condition whether the directory needs to be removed is unchecked and prone to a file system race condition (shared resource).\r\n\r\ngiven the directory is already removed on the file-system while shutting down, mkdocs serve throws an exception and exits in failure.\r\n\r\nthis error can be safely defined out of existence by removing the temporary directory only if it exists, will suggest a PR for this report. /E: created #2421\n", "before_files": [{"content": "import logging\nimport shutil\nimport tempfile\n\nfrom os.path import isfile, join\nfrom mkdocs.commands.build import build\nfrom mkdocs.config import load_config\nfrom mkdocs.exceptions import Abort\nfrom mkdocs.livereload import LiveReloadServer\n\nlog = logging.getLogger(__name__)\n\n\ndef serve(config_file=None, dev_addr=None, strict=None, theme=None,\n theme_dir=None, livereload='livereload', watch_theme=False, **kwargs):\n \"\"\"\n Start the MkDocs development server\n\n By default it will serve the documentation on http://localhost:8000/ and\n it will rebuild the documentation and refresh the page automatically\n whenever a file is edited.\n \"\"\"\n\n # Create a temporary build directory, and set some options to serve it\n # PY2 returns a byte string by default. The Unicode prefix ensures a Unicode\n # string is returned. And it makes MkDocs temp dirs easier to identify.\n site_dir = tempfile.mkdtemp(prefix='mkdocs_')\n\n def builder():\n log.info(\"Building documentation...\")\n config = load_config(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n theme_dir=theme_dir,\n site_dir=site_dir,\n **kwargs\n )\n # Override a few config settings after validation\n config['site_url'] = 'http://{}/'.format(config['dev_addr'])\n\n live_server = livereload in ['dirty', 'livereload']\n dirty = livereload == 'dirty'\n build(config, live_server=live_server, dirty=dirty)\n return config\n\n try:\n # Perform the initial build\n config = builder()\n\n host, port = config['dev_addr']\n\n server = LiveReloadServer(builder=builder, host=host, port=port, root=site_dir)\n\n def error_handler(code):\n if code in (404, 500):\n error_page = join(site_dir, f'{code}.html')\n if isfile(error_page):\n with open(error_page, 'rb') as f:\n return f.read()\n\n server.error_handler = error_handler\n\n if livereload in ['livereload', 'dirty']:\n # Watch the documentation files, the config file and the theme files.\n server.watch(config['docs_dir'])\n server.watch(config['config_file_path'])\n\n if watch_theme:\n for d in config['theme'].dirs:\n server.watch(d)\n\n # Run `serve` plugin events.\n server = config['plugins'].run_event('serve', server, config=config, builder=builder)\n\n try:\n server.serve()\n except KeyboardInterrupt:\n log.info(\"Shutting down...\")\n finally:\n server.shutdown()\n except OSError as e: # pragma: no cover\n # Avoid ugly, unhelpful traceback\n raise Abort(str(e))\n finally:\n shutil.rmtree(site_dir)\n", "path": "mkdocs/commands/serve.py"}]}
1,455
159
gh_patches_debug_12053
rasdani/github-patches
git_diff
lnbits__lnbits-215
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> No success message and URL returned with LNURLp links As the title says, the neither the URL, nor the message are returned to the wallet making the payment. This used to work when I was still on the [latest `raspiblitz` tag](https://github.com/lnbits/lnbits/releases/tag/raspiblitz). Now on [this commit](https://github.com/lnbits/lnbits/commit/7ab4553ef5083f6746bd6fe747775ba57e2e54a4) (the issue could have appeared during any of the commits in between), it has stopped working, even on newly created LNURLp links. Edit: Reproducible on lnbits.com </issue> <code> [start of lnbits/extensions/lnurlp/lnurl.py] 1 import hashlib 2 import math 3 from http import HTTPStatus 4 from quart import jsonify, url_for, request 5 from lnurl import LnurlPayResponse, LnurlPayActionResponse, LnurlErrorResponse # type: ignore 6 7 from lnbits.core.services import create_invoice 8 from lnbits.utils.exchange_rates import get_fiat_rate_satoshis 9 10 from . import lnurlp_ext 11 from .crud import increment_pay_link 12 13 14 @lnurlp_ext.route("/api/v1/lnurl/<link_id>", methods=["GET"]) 15 async def api_lnurl_response(link_id): 16 link = await increment_pay_link(link_id, served_meta=1) 17 if not link: 18 return ( 19 jsonify({"status": "ERROR", "reason": "LNURL-pay not found."}), 20 HTTPStatus.OK, 21 ) 22 23 rate = await get_fiat_rate_satoshis(link.currency) if link.currency else 1 24 resp = LnurlPayResponse( 25 callback=url_for("lnurlp.api_lnurl_callback", link_id=link.id, _external=True), 26 min_sendable=math.ceil(link.min * rate) * 1000, 27 max_sendable=round(link.max * rate) * 1000, 28 metadata=link.lnurlpay_metadata, 29 ) 30 params = resp.dict() 31 32 if link.comment_chars > 0: 33 params["commentAllowed"] = link.comment_chars 34 35 return jsonify(params), HTTPStatus.OK 36 37 38 @lnurlp_ext.route("/api/v1/lnurl/cb/<link_id>", methods=["GET"]) 39 async def api_lnurl_callback(link_id): 40 link = await increment_pay_link(link_id, served_pr=1) 41 if not link: 42 return ( 43 jsonify({"status": "ERROR", "reason": "LNURL-pay not found."}), 44 HTTPStatus.OK, 45 ) 46 47 min, max = link.min, link.max 48 rate = await get_fiat_rate_satoshis(link.currency) if link.currency else 1 49 if link.currency: 50 # allow some fluctuation (as the fiat price may have changed between the calls) 51 min = rate * 995 * link.min 52 max = rate * 1010 * link.max 53 else: 54 min = link.min * 1000 55 max = link.max * 1000 56 57 amount_received = int(request.args.get("amount") or 0) 58 if amount_received < min: 59 return ( 60 jsonify( 61 LnurlErrorResponse( 62 reason=f"Amount {amount_received} is smaller than minimum {min}." 63 ).dict() 64 ), 65 HTTPStatus.OK, 66 ) 67 elif amount_received > max: 68 return ( 69 jsonify( 70 LnurlErrorResponse( 71 reason=f"Amount {amount_received} is greater than maximum {max}." 72 ).dict() 73 ), 74 HTTPStatus.OK, 75 ) 76 77 comment = request.args.get("comment") 78 if len(comment or "") > link.comment_chars: 79 return ( 80 jsonify( 81 LnurlErrorResponse( 82 reason=f"Got a comment with {len(comment)} characters, but can only accept {link.comment_chars}" 83 ).dict() 84 ), 85 HTTPStatus.OK, 86 ) 87 88 payment_hash, payment_request = await create_invoice( 89 wallet_id=link.wallet, 90 amount=int(amount_received / 1000), 91 memo=link.description, 92 description_hash=hashlib.sha256( 93 link.lnurlpay_metadata.encode("utf-8") 94 ).digest(), 95 extra={"tag": "lnurlp", "link": link.id, "comment": comment}, 96 ) 97 98 resp = { 99 "routes": [], 100 "pr": payment_request, 101 } 102 103 success_action = link.success_action(payment_hash) 104 if success_action: 105 resp["success_action"] = success_action 106 107 return jsonify(resp), HTTPStatus.OK 108 [end of lnbits/extensions/lnurlp/lnurl.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lnbits/extensions/lnurlp/lnurl.py b/lnbits/extensions/lnurlp/lnurl.py --- a/lnbits/extensions/lnurlp/lnurl.py +++ b/lnbits/extensions/lnurlp/lnurl.py @@ -95,13 +95,17 @@ extra={"tag": "lnurlp", "link": link.id, "comment": comment}, ) - resp = { - "routes": [], - "pr": payment_request, - } - success_action = link.success_action(payment_hash) if success_action: - resp["success_action"] = success_action + resp = LnurlPayActionResponse( + pr=payment_request, + success_action=success_action, + routes=[], + ) + else: + resp = LnurlPayActionResponse( + pr=payment_request, + routes=[], + ) - return jsonify(resp), HTTPStatus.OK + return jsonify(resp.dict()), HTTPStatus.OK
{"golden_diff": "diff --git a/lnbits/extensions/lnurlp/lnurl.py b/lnbits/extensions/lnurlp/lnurl.py\n--- a/lnbits/extensions/lnurlp/lnurl.py\n+++ b/lnbits/extensions/lnurlp/lnurl.py\n@@ -95,13 +95,17 @@\n extra={\"tag\": \"lnurlp\", \"link\": link.id, \"comment\": comment},\n )\n \n- resp = {\n- \"routes\": [],\n- \"pr\": payment_request,\n- }\n-\n success_action = link.success_action(payment_hash)\n if success_action:\n- resp[\"success_action\"] = success_action\n+ resp = LnurlPayActionResponse(\n+ pr=payment_request,\n+ success_action=success_action,\n+ routes=[],\n+ )\n+ else:\n+ resp = LnurlPayActionResponse(\n+ pr=payment_request,\n+ routes=[],\n+ )\n \n- return jsonify(resp), HTTPStatus.OK\n+ return jsonify(resp.dict()), HTTPStatus.OK\n", "issue": "No success message and URL returned with LNURLp links\nAs the title says, the neither the URL, nor the message are returned to the wallet making the payment. This used to work when I was still on the [latest `raspiblitz` tag](https://github.com/lnbits/lnbits/releases/tag/raspiblitz). Now on [this commit](https://github.com/lnbits/lnbits/commit/7ab4553ef5083f6746bd6fe747775ba57e2e54a4) (the issue could have appeared during any of the commits in between), it has stopped working, even on newly created LNURLp links.\r\nEdit: Reproducible on lnbits.com\n", "before_files": [{"content": "import hashlib\nimport math\nfrom http import HTTPStatus\nfrom quart import jsonify, url_for, request\nfrom lnurl import LnurlPayResponse, LnurlPayActionResponse, LnurlErrorResponse # type: ignore\n\nfrom lnbits.core.services import create_invoice\nfrom lnbits.utils.exchange_rates import get_fiat_rate_satoshis\n\nfrom . import lnurlp_ext\nfrom .crud import increment_pay_link\n\n\n@lnurlp_ext.route(\"/api/v1/lnurl/<link_id>\", methods=[\"GET\"])\nasync def api_lnurl_response(link_id):\n link = await increment_pay_link(link_id, served_meta=1)\n if not link:\n return (\n jsonify({\"status\": \"ERROR\", \"reason\": \"LNURL-pay not found.\"}),\n HTTPStatus.OK,\n )\n\n rate = await get_fiat_rate_satoshis(link.currency) if link.currency else 1\n resp = LnurlPayResponse(\n callback=url_for(\"lnurlp.api_lnurl_callback\", link_id=link.id, _external=True),\n min_sendable=math.ceil(link.min * rate) * 1000,\n max_sendable=round(link.max * rate) * 1000,\n metadata=link.lnurlpay_metadata,\n )\n params = resp.dict()\n\n if link.comment_chars > 0:\n params[\"commentAllowed\"] = link.comment_chars\n\n return jsonify(params), HTTPStatus.OK\n\n\n@lnurlp_ext.route(\"/api/v1/lnurl/cb/<link_id>\", methods=[\"GET\"])\nasync def api_lnurl_callback(link_id):\n link = await increment_pay_link(link_id, served_pr=1)\n if not link:\n return (\n jsonify({\"status\": \"ERROR\", \"reason\": \"LNURL-pay not found.\"}),\n HTTPStatus.OK,\n )\n\n min, max = link.min, link.max\n rate = await get_fiat_rate_satoshis(link.currency) if link.currency else 1\n if link.currency:\n # allow some fluctuation (as the fiat price may have changed between the calls)\n min = rate * 995 * link.min\n max = rate * 1010 * link.max\n else:\n min = link.min * 1000\n max = link.max * 1000\n\n amount_received = int(request.args.get(\"amount\") or 0)\n if amount_received < min:\n return (\n jsonify(\n LnurlErrorResponse(\n reason=f\"Amount {amount_received} is smaller than minimum {min}.\"\n ).dict()\n ),\n HTTPStatus.OK,\n )\n elif amount_received > max:\n return (\n jsonify(\n LnurlErrorResponse(\n reason=f\"Amount {amount_received} is greater than maximum {max}.\"\n ).dict()\n ),\n HTTPStatus.OK,\n )\n\n comment = request.args.get(\"comment\")\n if len(comment or \"\") > link.comment_chars:\n return (\n jsonify(\n LnurlErrorResponse(\n reason=f\"Got a comment with {len(comment)} characters, but can only accept {link.comment_chars}\"\n ).dict()\n ),\n HTTPStatus.OK,\n )\n\n payment_hash, payment_request = await create_invoice(\n wallet_id=link.wallet,\n amount=int(amount_received / 1000),\n memo=link.description,\n description_hash=hashlib.sha256(\n link.lnurlpay_metadata.encode(\"utf-8\")\n ).digest(),\n extra={\"tag\": \"lnurlp\", \"link\": link.id, \"comment\": comment},\n )\n\n resp = {\n \"routes\": [],\n \"pr\": payment_request,\n }\n\n success_action = link.success_action(payment_hash)\n if success_action:\n resp[\"success_action\"] = success_action\n\n return jsonify(resp), HTTPStatus.OK\n", "path": "lnbits/extensions/lnurlp/lnurl.py"}]}
1,755
232
gh_patches_debug_2997
rasdani/github-patches
git_diff
ivy-llc__ivy-20554
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> rfftn </issue> <code> [start of ivy/functional/frontends/scipy/fft/fft.py] 1 # global 2 import ivy 3 from ivy.functional.frontends.scipy.func_wrapper import ( 4 to_ivy_arrays_and_back, 5 ) 6 7 8 # fft 9 @to_ivy_arrays_and_back 10 def fft(x, n=None, axis=-1, norm="backward", overwrite_x=False): 11 return ivy.fft(x, axis, norm=norm, n=n) 12 13 14 # ifft 15 @to_ivy_arrays_and_back 16 def ifft(x, n=None, axis=-1, norm="backward", overwrite_x=False): 17 return ivy.ifft(x, axis, norm=norm, n=n) 18 19 20 # dct 21 @to_ivy_arrays_and_back 22 def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, orthogonalize=None): 23 return ivy.dct(x, type=type, n=n, axis=axis, norm=norm) 24 25 26 # idct 27 @to_ivy_arrays_and_back 28 def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, orthogonalize=None): 29 inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type] 30 return ivy.dct(x, type=inverse_type, n=n, axis=axis, norm=norm) 31 32 33 @to_ivy_arrays_and_back 34 def fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False): 35 return ivy.fft2(x, s=s, dim=axes, norm=norm) 36 37 38 @to_ivy_arrays_and_back 39 def ifftn( 40 x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None 41 ): 42 return ivy.ifftn(x, s=s, dim=axes, norm=norm) 43 [end of ivy/functional/frontends/scipy/fft/fft.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/scipy/fft/fft.py b/ivy/functional/frontends/scipy/fft/fft.py --- a/ivy/functional/frontends/scipy/fft/fft.py +++ b/ivy/functional/frontends/scipy/fft/fft.py @@ -40,3 +40,10 @@ x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None ): return ivy.ifftn(x, s=s, dim=axes, norm=norm) + + +@to_ivy_arrays_and_back +def rfftn( + x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None +): + return ivy.rfftn(x, s=s, dim=axes, norm=norm)
{"golden_diff": "diff --git a/ivy/functional/frontends/scipy/fft/fft.py b/ivy/functional/frontends/scipy/fft/fft.py\n--- a/ivy/functional/frontends/scipy/fft/fft.py\n+++ b/ivy/functional/frontends/scipy/fft/fft.py\n@@ -40,3 +40,10 @@\n x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None\n ):\n return ivy.ifftn(x, s=s, dim=axes, norm=norm)\n+\n+\n+@to_ivy_arrays_and_back\n+def rfftn(\n+ x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None\n+):\n+ return ivy.rfftn(x, s=s, dim=axes, norm=norm)\n", "issue": "rfftn\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.scipy.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n# fft\n@to_ivy_arrays_and_back\ndef fft(x, n=None, axis=-1, norm=\"backward\", overwrite_x=False):\n return ivy.fft(x, axis, norm=norm, n=n)\n\n\n# ifft\n@to_ivy_arrays_and_back\ndef ifft(x, n=None, axis=-1, norm=\"backward\", overwrite_x=False):\n return ivy.ifft(x, axis, norm=norm, n=n)\n\n\n# dct\n@to_ivy_arrays_and_back\ndef dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, orthogonalize=None):\n return ivy.dct(x, type=type, n=n, axis=axis, norm=norm)\n\n\n# idct\n@to_ivy_arrays_and_back\ndef idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, orthogonalize=None):\n inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type]\n return ivy.dct(x, type=inverse_type, n=n, axis=axis, norm=norm)\n\n\n@to_ivy_arrays_and_back\ndef fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False):\n return ivy.fft2(x, s=s, dim=axes, norm=norm)\n\n\n@to_ivy_arrays_and_back\ndef ifftn(\n x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None\n):\n return ivy.ifftn(x, s=s, dim=axes, norm=norm)\n", "path": "ivy/functional/frontends/scipy/fft/fft.py"}]}
1,027
186
gh_patches_debug_14237
rasdani/github-patches
git_diff
mne-tools__mne-python-10739
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> montage_sgskip.py example doesn't run When trying to run the example in `examples/visualization/montage_sgskip.py`, I get an exception: ```python ValueError: A head<->mri transformation matrix (trans) is required to plot head surfaces in head coordinates, `trans=None` is not allowed ``` It runs if I pass `trans='fsaverage'` but I'm not sure this is how this example intended to behave (as the BEM is based on a sphere here). @agramfort care to take a look? </issue> <code> [start of examples/visualization/montage_sgskip.py] 1 # -*- coding: utf-8 -*- 2 """ 3 .. _plot_montage: 4 5 Plotting sensor layouts of EEG systems 6 ====================================== 7 8 This example illustrates how to load all the EEG system montages 9 shipped in MNE-python, and display it on the fsaverage template subject. 10 """ # noqa: D205, D400 11 # Authors: Alexandre Gramfort <[email protected]> 12 # Joan Massich <[email protected]> 13 # 14 # License: BSD-3-Clause 15 16 # %% 17 18 import os.path as op 19 20 import mne 21 from mne.channels.montage import get_builtin_montages 22 from mne.datasets import fetch_fsaverage 23 from mne.viz import set_3d_title, set_3d_view 24 25 26 # %% 27 # Check all montages against a sphere 28 29 for current_montage in get_builtin_montages(): 30 montage = mne.channels.make_standard_montage(current_montage) 31 info = mne.create_info( 32 ch_names=montage.ch_names, sfreq=100., ch_types='eeg') 33 info.set_montage(montage) 34 sphere = mne.make_sphere_model(r0='auto', head_radius='auto', info=info) 35 fig = mne.viz.plot_alignment( 36 # Plot options 37 show_axes=True, dig='fiducials', surfaces='head', 38 bem=sphere, info=info) 39 set_3d_view(figure=fig, azimuth=135, elevation=80) 40 set_3d_title(figure=fig, title=current_montage) 41 42 43 # %% 44 # Check all montages against fsaverage 45 46 subjects_dir = op.dirname(fetch_fsaverage()) 47 48 for current_montage in get_builtin_montages(): 49 montage = mne.channels.make_standard_montage(current_montage) 50 # Create dummy info 51 info = mne.create_info( 52 ch_names=montage.ch_names, sfreq=100., ch_types='eeg') 53 info.set_montage(montage) 54 fig = mne.viz.plot_alignment( 55 # Plot options 56 show_axes=True, dig='fiducials', surfaces='head', mri_fiducials=True, 57 subject='fsaverage', subjects_dir=subjects_dir, info=info, 58 coord_frame='mri', 59 trans='fsaverage', # transform from head coords to fsaverage's MRI 60 ) 61 set_3d_view(figure=fig, azimuth=135, elevation=80) 62 set_3d_title(figure=fig, title=current_montage) 63 [end of examples/visualization/montage_sgskip.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/visualization/montage_sgskip.py b/examples/visualization/montage_sgskip.py --- a/examples/visualization/montage_sgskip.py +++ b/examples/visualization/montage_sgskip.py @@ -16,6 +16,7 @@ # %% import os.path as op +import numpy as np import mne from mne.channels.montage import get_builtin_montages @@ -35,6 +36,7 @@ fig = mne.viz.plot_alignment( # Plot options show_axes=True, dig='fiducials', surfaces='head', + trans=mne.Transform("head", "mri", trans=np.eye(4)), # identity bem=sphere, info=info) set_3d_view(figure=fig, azimuth=135, elevation=80) set_3d_title(figure=fig, title=current_montage)
{"golden_diff": "diff --git a/examples/visualization/montage_sgskip.py b/examples/visualization/montage_sgskip.py\n--- a/examples/visualization/montage_sgskip.py\n+++ b/examples/visualization/montage_sgskip.py\n@@ -16,6 +16,7 @@\n # %%\n \n import os.path as op\n+import numpy as np\n \n import mne\n from mne.channels.montage import get_builtin_montages\n@@ -35,6 +36,7 @@\n fig = mne.viz.plot_alignment(\n # Plot options\n show_axes=True, dig='fiducials', surfaces='head',\n+ trans=mne.Transform(\"head\", \"mri\", trans=np.eye(4)), # identity\n bem=sphere, info=info)\n set_3d_view(figure=fig, azimuth=135, elevation=80)\n set_3d_title(figure=fig, title=current_montage)\n", "issue": "montage_sgskip.py example doesn't run\nWhen trying to run the example in `examples/visualization/montage_sgskip.py`, I get an exception:\r\n\r\n```python\r\nValueError: A head<->mri transformation matrix (trans) is required to plot head surfaces in head coordinates, `trans=None` is not allowed\r\n```\r\nIt runs if I pass `trans='fsaverage'` but I'm not sure this is how this example intended to behave (as the BEM is based on a sphere here).\r\n\r\n@agramfort care to take a look?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. _plot_montage:\n\nPlotting sensor layouts of EEG systems\n======================================\n\nThis example illustrates how to load all the EEG system montages\nshipped in MNE-python, and display it on the fsaverage template subject.\n\"\"\" # noqa: D205, D400\n# Authors: Alexandre Gramfort <[email protected]>\n# Joan Massich <[email protected]>\n#\n# License: BSD-3-Clause\n\n# %%\n\nimport os.path as op\n\nimport mne\nfrom mne.channels.montage import get_builtin_montages\nfrom mne.datasets import fetch_fsaverage\nfrom mne.viz import set_3d_title, set_3d_view\n\n\n# %%\n# Check all montages against a sphere\n\nfor current_montage in get_builtin_montages():\n montage = mne.channels.make_standard_montage(current_montage)\n info = mne.create_info(\n ch_names=montage.ch_names, sfreq=100., ch_types='eeg')\n info.set_montage(montage)\n sphere = mne.make_sphere_model(r0='auto', head_radius='auto', info=info)\n fig = mne.viz.plot_alignment(\n # Plot options\n show_axes=True, dig='fiducials', surfaces='head',\n bem=sphere, info=info)\n set_3d_view(figure=fig, azimuth=135, elevation=80)\n set_3d_title(figure=fig, title=current_montage)\n\n\n# %%\n# Check all montages against fsaverage\n\nsubjects_dir = op.dirname(fetch_fsaverage())\n\nfor current_montage in get_builtin_montages():\n montage = mne.channels.make_standard_montage(current_montage)\n # Create dummy info\n info = mne.create_info(\n ch_names=montage.ch_names, sfreq=100., ch_types='eeg')\n info.set_montage(montage)\n fig = mne.viz.plot_alignment(\n # Plot options\n show_axes=True, dig='fiducials', surfaces='head', mri_fiducials=True,\n subject='fsaverage', subjects_dir=subjects_dir, info=info,\n coord_frame='mri',\n trans='fsaverage', # transform from head coords to fsaverage's MRI\n )\n set_3d_view(figure=fig, azimuth=135, elevation=80)\n set_3d_title(figure=fig, title=current_montage)\n", "path": "examples/visualization/montage_sgskip.py"}]}
1,343
207
gh_patches_debug_27163
rasdani/github-patches
git_diff
wemake-services__wemake-python-styleguide-844
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `if` after `else` in `try` raises false positive WPS513 # Bug report <!-- Hi, thanks for submitting a bug. We appreciate that. But, we will need some information about what's wrong to help you. --> ## What's wrong Code: ```python try: return_value = ast.literal_eval(node.body) except ValueError: # The value was not literal, it's ok. return else: if return_value in self._primitive_values: self.add_violation(ImplicitPrimitiveViolation(node)) ``` Output: ``` 329:13 WPS513 Found implicit `elif` condition if return_value in self._primitive_values: ``` <!-- Describe what is not working. Please, attach a traceback. --> ## How is that should be It should not raise this violation. It is not a part of `if` / `else` case. </issue> <code> [start of wemake_python_styleguide/visitors/tokenize/conditions.py] 1 # -*- coding: utf-8 -*- 2 3 import tokenize 4 from typing import ClassVar, FrozenSet 5 6 from typing_extensions import final 7 8 from wemake_python_styleguide.violations.refactoring import ( 9 ImplicitElifViolation, 10 ) 11 from wemake_python_styleguide.visitors.base import BaseTokenVisitor 12 13 14 @final 15 class IfElseVisitor(BaseTokenVisitor): 16 """ 17 Checks if tokens tokens. 18 19 We use ``tokenize`` instead of ``ast`` because 20 21 .. code:: python 22 23 if some: 24 ... 25 else: 26 if other: 27 ... 28 29 has the same ``ast`` representation as: 30 31 .. code:: python 32 33 if some: 34 ... 35 elif other: 36 ... 37 38 That's why we have to use ``tokenize`` to find 39 the raw tokens inside the text. 40 41 """ 42 43 _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset(( 44 tokenize.NEWLINE, 45 tokenize.NL, 46 tokenize.COLON, 47 tokenize.INDENT, 48 )) 49 50 def visit_name(self, token: tokenize.TokenInfo) -> None: 51 """ 52 Checks that ``if`` nodes are defined correctly. 53 54 Raises: 55 ImplicitElifViolation 56 57 """ 58 self._check_implicit_elif(token) 59 60 def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None: 61 if token.string != 'else': 62 return 63 64 index = self.file_tokens.index(token) 65 # There's a bug in coverage, I am not sure how to make it work. 66 for next_token in self.file_tokens[index + 1:]: # pragma: no cover 67 if next_token.exact_type in self._allowed_token_types: 68 continue 69 elif next_token.string == 'if': 70 self.add_violation(ImplicitElifViolation(next_token)) 71 return 72 [end of wemake_python_styleguide/visitors/tokenize/conditions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wemake_python_styleguide/visitors/tokenize/conditions.py b/wemake_python_styleguide/visitors/tokenize/conditions.py --- a/wemake_python_styleguide/visitors/tokenize/conditions.py +++ b/wemake_python_styleguide/visitors/tokenize/conditions.py @@ -57,11 +57,36 @@ """ self._check_implicit_elif(token) + def _does_else_belong_to_if(self, start_index: int) -> bool: + previous_token = self.file_tokens[start_index - 1] + + if previous_token.type != tokenize.DEDENT: + # This is not the first token on the line, which means that it can + # also be "embedded" else: x if A else B + return False + + for token in reversed(self.file_tokens[:start_index - 1]): + if token.type != tokenize.NAME: + continue + + # Here we rely upon an intuition that in Python else have to be + # on the same level (same indentation) as parent statement. + if token.start[1] == previous_token.start[1]: + return token.string in {'if', 'elif'} + + return False + def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None: if token.string != 'else': return index = self.file_tokens.index(token) + + # `else` token can belong also to `for` and `try/except` statement, + # which can trigger false positive for that violation. + if not self._does_else_belong_to_if(index): + return + # There's a bug in coverage, I am not sure how to make it work. for next_token in self.file_tokens[index + 1:]: # pragma: no cover if next_token.exact_type in self._allowed_token_types:
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/tokenize/conditions.py b/wemake_python_styleguide/visitors/tokenize/conditions.py\n--- a/wemake_python_styleguide/visitors/tokenize/conditions.py\n+++ b/wemake_python_styleguide/visitors/tokenize/conditions.py\n@@ -57,11 +57,36 @@\n \"\"\"\n self._check_implicit_elif(token)\n \n+ def _does_else_belong_to_if(self, start_index: int) -> bool:\n+ previous_token = self.file_tokens[start_index - 1]\n+\n+ if previous_token.type != tokenize.DEDENT:\n+ # This is not the first token on the line, which means that it can\n+ # also be \"embedded\" else: x if A else B\n+ return False\n+\n+ for token in reversed(self.file_tokens[:start_index - 1]):\n+ if token.type != tokenize.NAME:\n+ continue\n+\n+ # Here we rely upon an intuition that in Python else have to be\n+ # on the same level (same indentation) as parent statement.\n+ if token.start[1] == previous_token.start[1]:\n+ return token.string in {'if', 'elif'}\n+\n+ return False\n+\n def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n if token.string != 'else':\n return\n \n index = self.file_tokens.index(token)\n+\n+ # `else` token can belong also to `for` and `try/except` statement,\n+ # which can trigger false positive for that violation.\n+ if not self._does_else_belong_to_if(index):\n+ return\n+\n # There's a bug in coverage, I am not sure how to make it work.\n for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n", "issue": "`if` after `else` in `try` raises false positive WPS513\n# Bug report\r\n\r\n<!--\r\nHi, thanks for submitting a bug. We appreciate that.\r\n\r\nBut, we will need some information about what's wrong to help you.\r\n-->\r\n\r\n## What's wrong\r\n\r\nCode:\r\n\r\n```python\r\n try:\r\n return_value = ast.literal_eval(node.body)\r\n except ValueError:\r\n # The value was not literal, it's ok.\r\n return\r\n else:\r\n if return_value in self._primitive_values:\r\n self.add_violation(ImplicitPrimitiveViolation(node))\r\n```\r\n\r\nOutput:\r\n\r\n```\r\n329:13 WPS513 Found implicit `elif` condition\r\n if return_value in self._primitive_values:\r\n```\r\n\r\n<!-- Describe what is not working. Please, attach a traceback. -->\r\n\r\n## How is that should be\r\n\r\nIt should not raise this violation. It is not a part of `if` / `else` case.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport tokenize\nfrom typing import ClassVar, FrozenSet\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations.refactoring import (\n ImplicitElifViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseTokenVisitor\n\n\n@final\nclass IfElseVisitor(BaseTokenVisitor):\n \"\"\"\n Checks if tokens tokens.\n\n We use ``tokenize`` instead of ``ast`` because\n\n .. code:: python\n\n if some:\n ...\n else:\n if other:\n ...\n\n has the same ``ast`` representation as:\n\n .. code:: python\n\n if some:\n ...\n elif other:\n ...\n\n That's why we have to use ``tokenize`` to find\n the raw tokens inside the text.\n\n \"\"\"\n\n _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((\n tokenize.NEWLINE,\n tokenize.NL,\n tokenize.COLON,\n tokenize.INDENT,\n ))\n\n def visit_name(self, token: tokenize.TokenInfo) -> None:\n \"\"\"\n Checks that ``if`` nodes are defined correctly.\n\n Raises:\n ImplicitElifViolation\n\n \"\"\"\n self._check_implicit_elif(token)\n\n def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n if token.string != 'else':\n return\n\n index = self.file_tokens.index(token)\n # There's a bug in coverage, I am not sure how to make it work.\n for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n self.add_violation(ImplicitElifViolation(next_token))\n return\n", "path": "wemake_python_styleguide/visitors/tokenize/conditions.py"}]}
1,295
427
gh_patches_debug_19372
rasdani/github-patches
git_diff
aws-powertools__powertools-lambda-python-1534
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Static typing: aws_lambda_powertools.logging.utils.copy_config_to_registered_loggers argument log_level should accept int ### Static type checker used mypy (project's standard) ### AWS Lambda function runtime 3.9 ### AWS Lambda Powertools for Python version latest ### Static type checker info ``` $ mypy repro.py repro.py:5: error: Argument "log_level" to "copy_config_to_registered_loggers" has incompatible type "int"; expected "Optional[str]" Found 1 error in 1 file (checked 1 source file) ``` ``` mypy --version mypy 0.971 (compiled: yes) ``` ### Code snippet ```python from aws_lambda_powertools.logging import utils from aws_lambda_powertools import Logger logger = Logger() utils.copy_config_to_registered_loggers(source_logger=logger, log_level=30) ``` ### Possible Solution Update signature to accept `Union[str, int]` </issue> <code> [start of aws_lambda_powertools/logging/utils.py] 1 import logging 2 from typing import Callable, List, Optional, Set, Union 3 4 from .logger import Logger 5 6 PACKAGE_LOGGER = "aws_lambda_powertools" 7 8 9 def copy_config_to_registered_loggers( 10 source_logger: Logger, 11 log_level: Optional[str] = None, 12 exclude: Optional[Set[str]] = None, 13 include: Optional[Set[str]] = None, 14 ) -> None: 15 16 """Copies source Logger level and handler to all registered loggers for consistent formatting. 17 18 Parameters 19 ---------- 20 source_logger : Logger 21 Powertools Logger to copy configuration from 22 log_level : str, optional 23 Logging level to set to registered loggers, by default uses source_logger logging level 24 include : Optional[Set[str]], optional 25 List of logger names to include, by default all registered loggers are included 26 exclude : Optional[Set[str]], optional 27 List of logger names to exclude, by default None 28 """ 29 level = log_level or source_logger.level 30 31 # Assumptions: Only take parent loggers not children (dot notation rule) 32 # Steps: 33 # 1. Default operation: Include all registered loggers 34 # 2. Only include set? Only add Loggers in the list and ignore all else 35 # 3. Include and exclude set? Add Logger if it’s in include and not in exclude 36 # 4. Only exclude set? Ignore Logger in the excluding list 37 38 # Exclude source and powertools package logger by default 39 # If source logger is a child ensure we exclude parent logger to not break child logger 40 # from receiving/pushing updates to keys being added/removed 41 source_logger_name = source_logger.name.split(".")[0] 42 43 if exclude: 44 exclude.update([source_logger_name, PACKAGE_LOGGER]) 45 else: 46 exclude = {source_logger_name, PACKAGE_LOGGER} 47 48 # Prepare loggers set 49 if include: 50 loggers = include.difference(exclude) 51 filter_func = _include_registered_loggers_filter 52 else: 53 loggers = exclude 54 filter_func = _exclude_registered_loggers_filter 55 56 registered_loggers = _find_registered_loggers(source_logger, loggers, filter_func) 57 for logger in registered_loggers: 58 _configure_logger(source_logger, logger, level) 59 60 61 def _include_registered_loggers_filter(loggers: Set[str]): 62 return [logging.getLogger(name) for name in logging.root.manager.loggerDict if "." not in name and name in loggers] 63 64 65 def _exclude_registered_loggers_filter(loggers: Set[str]) -> List[logging.Logger]: 66 return [ 67 logging.getLogger(name) for name in logging.root.manager.loggerDict if "." not in name and name not in loggers 68 ] 69 70 71 def _find_registered_loggers( 72 source_logger: Logger, loggers: Set[str], filter_func: Callable[[Set[str]], List[logging.Logger]] 73 ) -> List[logging.Logger]: 74 """Filter root loggers based on provided parameters.""" 75 root_loggers = filter_func(loggers) 76 source_logger.debug(f"Filtered root loggers: {root_loggers}") 77 return root_loggers 78 79 80 def _configure_logger(source_logger: Logger, logger: logging.Logger, level: Union[int, str]) -> None: 81 logger.handlers = [] 82 logger.setLevel(level) 83 logger.propagate = False # ensure we don't propagate logs to existing loggers, #1073 84 source_logger.debug(f"Logger {logger} reconfigured to use logging level {level}") 85 for source_handler in source_logger.handlers: 86 logger.addHandler(source_handler) 87 source_logger.debug(f"Logger {logger} reconfigured to use {source_handler}") 88 [end of aws_lambda_powertools/logging/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/aws_lambda_powertools/logging/utils.py b/aws_lambda_powertools/logging/utils.py --- a/aws_lambda_powertools/logging/utils.py +++ b/aws_lambda_powertools/logging/utils.py @@ -8,7 +8,7 @@ def copy_config_to_registered_loggers( source_logger: Logger, - log_level: Optional[str] = None, + log_level: Optional[Union[int, str]] = None, exclude: Optional[Set[str]] = None, include: Optional[Set[str]] = None, ) -> None: @@ -19,7 +19,7 @@ ---------- source_logger : Logger Powertools Logger to copy configuration from - log_level : str, optional + log_level : Union[int, str], optional Logging level to set to registered loggers, by default uses source_logger logging level include : Optional[Set[str]], optional List of logger names to include, by default all registered loggers are included
{"golden_diff": "diff --git a/aws_lambda_powertools/logging/utils.py b/aws_lambda_powertools/logging/utils.py\n--- a/aws_lambda_powertools/logging/utils.py\n+++ b/aws_lambda_powertools/logging/utils.py\n@@ -8,7 +8,7 @@\n \n def copy_config_to_registered_loggers(\n source_logger: Logger,\n- log_level: Optional[str] = None,\n+ log_level: Optional[Union[int, str]] = None,\n exclude: Optional[Set[str]] = None,\n include: Optional[Set[str]] = None,\n ) -> None:\n@@ -19,7 +19,7 @@\n ----------\n source_logger : Logger\n Powertools Logger to copy configuration from\n- log_level : str, optional\n+ log_level : Union[int, str], optional\n Logging level to set to registered loggers, by default uses source_logger logging level\n include : Optional[Set[str]], optional\n List of logger names to include, by default all registered loggers are included\n", "issue": "Static typing: aws_lambda_powertools.logging.utils.copy_config_to_registered_loggers argument log_level should accept int\n### Static type checker used\r\n\r\nmypy (project's standard)\r\n\r\n### AWS Lambda function runtime\r\n\r\n3.9\r\n\r\n### AWS Lambda Powertools for Python version\r\n\r\nlatest\r\n\r\n### Static type checker info\r\n\r\n```\r\n$ mypy repro.py\r\nrepro.py:5: error: Argument \"log_level\" to \"copy_config_to_registered_loggers\" has incompatible type \"int\"; expected \"Optional[str]\"\r\nFound 1 error in 1 file (checked 1 source file)\r\n```\r\n\r\n\r\n```\r\nmypy --version\r\nmypy 0.971 (compiled: yes)\r\n```\r\n\r\n### Code snippet\r\n\r\n```python\r\nfrom aws_lambda_powertools.logging import utils\r\nfrom aws_lambda_powertools import Logger\r\n\r\nlogger = Logger()\r\nutils.copy_config_to_registered_loggers(source_logger=logger, log_level=30)\r\n```\r\n\r\n\r\n### Possible Solution\r\n\r\nUpdate signature to accept `Union[str, int]`\n", "before_files": [{"content": "import logging\nfrom typing import Callable, List, Optional, Set, Union\n\nfrom .logger import Logger\n\nPACKAGE_LOGGER = \"aws_lambda_powertools\"\n\n\ndef copy_config_to_registered_loggers(\n source_logger: Logger,\n log_level: Optional[str] = None,\n exclude: Optional[Set[str]] = None,\n include: Optional[Set[str]] = None,\n) -> None:\n\n \"\"\"Copies source Logger level and handler to all registered loggers for consistent formatting.\n\n Parameters\n ----------\n source_logger : Logger\n Powertools Logger to copy configuration from\n log_level : str, optional\n Logging level to set to registered loggers, by default uses source_logger logging level\n include : Optional[Set[str]], optional\n List of logger names to include, by default all registered loggers are included\n exclude : Optional[Set[str]], optional\n List of logger names to exclude, by default None\n \"\"\"\n level = log_level or source_logger.level\n\n # Assumptions: Only take parent loggers not children (dot notation rule)\n # Steps:\n # 1. Default operation: Include all registered loggers\n # 2. Only include set? Only add Loggers in the list and ignore all else\n # 3. Include and exclude set? Add Logger if it\u2019s in include and not in exclude\n # 4. Only exclude set? Ignore Logger in the excluding list\n\n # Exclude source and powertools package logger by default\n # If source logger is a child ensure we exclude parent logger to not break child logger\n # from receiving/pushing updates to keys being added/removed\n source_logger_name = source_logger.name.split(\".\")[0]\n\n if exclude:\n exclude.update([source_logger_name, PACKAGE_LOGGER])\n else:\n exclude = {source_logger_name, PACKAGE_LOGGER}\n\n # Prepare loggers set\n if include:\n loggers = include.difference(exclude)\n filter_func = _include_registered_loggers_filter\n else:\n loggers = exclude\n filter_func = _exclude_registered_loggers_filter\n\n registered_loggers = _find_registered_loggers(source_logger, loggers, filter_func)\n for logger in registered_loggers:\n _configure_logger(source_logger, logger, level)\n\n\ndef _include_registered_loggers_filter(loggers: Set[str]):\n return [logging.getLogger(name) for name in logging.root.manager.loggerDict if \".\" not in name and name in loggers]\n\n\ndef _exclude_registered_loggers_filter(loggers: Set[str]) -> List[logging.Logger]:\n return [\n logging.getLogger(name) for name in logging.root.manager.loggerDict if \".\" not in name and name not in loggers\n ]\n\n\ndef _find_registered_loggers(\n source_logger: Logger, loggers: Set[str], filter_func: Callable[[Set[str]], List[logging.Logger]]\n) -> List[logging.Logger]:\n \"\"\"Filter root loggers based on provided parameters.\"\"\"\n root_loggers = filter_func(loggers)\n source_logger.debug(f\"Filtered root loggers: {root_loggers}\")\n return root_loggers\n\n\ndef _configure_logger(source_logger: Logger, logger: logging.Logger, level: Union[int, str]) -> None:\n logger.handlers = []\n logger.setLevel(level)\n logger.propagate = False # ensure we don't propagate logs to existing loggers, #1073\n source_logger.debug(f\"Logger {logger} reconfigured to use logging level {level}\")\n for source_handler in source_logger.handlers:\n logger.addHandler(source_handler)\n source_logger.debug(f\"Logger {logger} reconfigured to use {source_handler}\")\n", "path": "aws_lambda_powertools/logging/utils.py"}]}
1,691
213
gh_patches_debug_24756
rasdani/github-patches
git_diff
netbox-community__netbox-2290
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> deficiency in new webhook implementation <!-- Before opening a new issue, please search through the existing issues to see if your topic has already been addressed. Note that you may need to remove the "is:open" filter from the search bar to include closed issues. Check the appropriate type for your issue below by placing an x between the brackets. For assistance with installation issues, or for any other issues other than those listed below, please raise your topic for discussion on our mailing list: https://groups.google.com/forum/#!forum/netbox-discuss Please note that issues which do not fall under any of the below categories will be closed. Due to an excessive backlog of feature requests, we are not currently accepting any proposals which extend NetBox's feature scope. Do not prepend any sort of tag to your issue's title. An administrator will review your issue and assign labels as appropriate. ---> ### Issue type [ ] Feature request <!-- An enhancement of existing functionality --> [X] Bug report <!-- Unexpected or erroneous behavior --> [ ] Documentation <!-- A modification to the documentation --> <!-- Please describe the environment in which you are running NetBox. (Be sure to verify that you are running the latest stable release of NetBox before submitting a bug report.) If you are submitting a bug report and have made any changes to the code base, please first validate that your bug can be recreated while running an official release. --> ### Environment * Python version: python 2.7.5 * NetBox version: develop-2.4 <!-- BUG REPORTS must include: * A list of the steps needed for someone else to reproduce the bug * A description of the expected and observed behavior * Any relevant error messages (screenshots may also help) FEATURE REQUESTS must include: * A detailed description of the proposed functionality * A use case for the new feature * A rough description of any necessary changes to the database schema * Any relevant third-party libraries which would be needed --> ### Description Testing out the webhook implementation and discovered the following bug: when a model contains a custom field of type date the worker is unable to serialize the data for transmission ``` Traceback (most recent call last): File "/usr/lib/python2.7/site-packages/rq/worker.py", line 793, in perform_job rv = job.perform() File "/usr/lib/python2.7/site-packages/rq/job.py", line 599, in perform self._result = self._execute() File "/usr/lib/python2.7/site-packages/rq/job.py", line 605, in _execute return self.func(*self.args, **self.kwargs) File "/opt/netbox/netbox/extras/webhooks_worker.py", line 44, in process_webhook prepared_request = requests.Request(**params).prepare() File "/usr/lib/python2.7/site-packages/requests/models.py", line 259, in prepare hooks=self.hooks, File "/usr/lib/python2.7/site-packages/requests/models.py", line 307, in prepare self.prepare_body(data, files, json) File "/usr/lib/python2.7/site-packages/requests/models.py", line 427, in prepare_body body = json_dumps(json) File "/usr/lib64/python2.7/site-packages/simplejson/__init__.py", line 382, in dumps return _default_encoder.encode(obj) File "/usr/lib64/python2.7/site-packages/simplejson/encoder.py", line 291, in encode chunks = self.iterencode(o, _one_shot=True) File "/usr/lib64/python2.7/site-packages/simplejson/encoder.py", line 373, in iterencode return _iterencode(o, 0) File "/usr/lib64/python2.7/site-packages/simplejson/encoder.py", line 268, in default o.__class__.__name__) TypeError: Object of type date is not JSON serializable ``` </issue> <code> [start of netbox/extras/webhooks_worker.py] 1 import hashlib 2 import hmac 3 4 import requests 5 from django_rq import job 6 7 from extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES 8 9 10 @job('default') 11 def process_webhook(webhook, data, model_class, event, timestamp): 12 """ 13 Make a POST request to the defined Webhook 14 """ 15 payload = { 16 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event], 17 'timestamp': timestamp, 18 'model': model_class.__name__, 19 'data': data 20 } 21 headers = { 22 'Content-Type': webhook.get_http_content_type_display(), 23 } 24 params = { 25 'method': 'POST', 26 'url': webhook.payload_url, 27 'headers': headers 28 } 29 30 if webhook.http_content_type == WEBHOOK_CT_JSON: 31 params.update({'json': payload}) 32 elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED: 33 params.update({'data': payload}) 34 35 prepared_request = requests.Request(**params).prepare() 36 37 if webhook.secret != '': 38 # sign the request with the secret 39 hmac_prep = hmac.new(bytearray(webhook.secret, 'utf8'), prepared_request.body, digestmod=hashlib.sha512) 40 prepared_request.headers['X-Hook-Signature'] = hmac_prep.hexdigest() 41 42 with requests.Session() as session: 43 session.verify = webhook.ssl_verification 44 response = session.send(prepared_request) 45 46 if response.status_code >= 200 and response.status_code <= 299: 47 return 'Status {} returned, webhook successfully processed.'.format(response.status_code) 48 else: 49 raise requests.exceptions.RequestException( 50 "Status {} returned, webhook FAILED to process.".format(response.status_code) 51 ) 52 [end of netbox/extras/webhooks_worker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/extras/webhooks_worker.py b/netbox/extras/webhooks_worker.py --- a/netbox/extras/webhooks_worker.py +++ b/netbox/extras/webhooks_worker.py @@ -1,8 +1,10 @@ import hashlib import hmac - import requests +import json + from django_rq import job +from rest_framework.utils.encoders import JSONEncoder from extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES @@ -13,9 +15,9 @@ Make a POST request to the defined Webhook """ payload = { - 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event], + 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event].lower(), 'timestamp': timestamp, - 'model': model_class.__name__, + 'model': model_class._meta.model_name, 'data': data } headers = { @@ -28,7 +30,7 @@ } if webhook.http_content_type == WEBHOOK_CT_JSON: - params.update({'json': payload}) + params.update({'data': json.dumps(payload, cls=JSONEncoder)}) elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED: params.update({'data': payload})
{"golden_diff": "diff --git a/netbox/extras/webhooks_worker.py b/netbox/extras/webhooks_worker.py\n--- a/netbox/extras/webhooks_worker.py\n+++ b/netbox/extras/webhooks_worker.py\n@@ -1,8 +1,10 @@\n import hashlib\n import hmac\n-\n import requests\n+import json\n+\n from django_rq import job\n+from rest_framework.utils.encoders import JSONEncoder\n \n from extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES\n \n@@ -13,9 +15,9 @@\n Make a POST request to the defined Webhook\n \"\"\"\n payload = {\n- 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event],\n+ 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event].lower(),\n 'timestamp': timestamp,\n- 'model': model_class.__name__,\n+ 'model': model_class._meta.model_name,\n 'data': data\n }\n headers = {\n@@ -28,7 +30,7 @@\n }\n \n if webhook.http_content_type == WEBHOOK_CT_JSON:\n- params.update({'json': payload})\n+ params.update({'data': json.dumps(payload, cls=JSONEncoder)})\n elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED:\n params.update({'data': payload})\n", "issue": "deficiency in new webhook implementation\n<!--\r\n Before opening a new issue, please search through the existing issues to\r\n see if your topic has already been addressed. Note that you may need to\r\n remove the \"is:open\" filter from the search bar to include closed issues.\r\n\r\n Check the appropriate type for your issue below by placing an x between the\r\n brackets. For assistance with installation issues, or for any other issues\r\n other than those listed below, please raise your topic for discussion on\r\n our mailing list:\r\n\r\n https://groups.google.com/forum/#!forum/netbox-discuss\r\n\r\n Please note that issues which do not fall under any of the below categories\r\n will be closed. Due to an excessive backlog of feature requests, we are\r\n not currently accepting any proposals which extend NetBox's feature scope.\r\n\r\n Do not prepend any sort of tag to your issue's title. An administrator will\r\n review your issue and assign labels as appropriate.\r\n--->\r\n### Issue type\r\n[ ] Feature request <!-- An enhancement of existing functionality -->\r\n[X] Bug report <!-- Unexpected or erroneous behavior -->\r\n[ ] Documentation <!-- A modification to the documentation -->\r\n\r\n<!--\r\n Please describe the environment in which you are running NetBox. (Be sure\r\n to verify that you are running the latest stable release of NetBox before\r\n submitting a bug report.) If you are submitting a bug report and have made\r\n any changes to the code base, please first validate that your bug can be\r\n recreated while running an official release.\r\n-->\r\n### Environment\r\n* Python version: python 2.7.5\r\n* NetBox version: develop-2.4\r\n\r\n<!--\r\n BUG REPORTS must include:\r\n * A list of the steps needed for someone else to reproduce the bug\r\n * A description of the expected and observed behavior\r\n * Any relevant error messages (screenshots may also help)\r\n\r\n FEATURE REQUESTS must include:\r\n * A detailed description of the proposed functionality\r\n * A use case for the new feature\r\n * A rough description of any necessary changes to the database schema\r\n * Any relevant third-party libraries which would be needed\r\n-->\r\n### Description\r\nTesting out the webhook implementation and discovered the following bug:\r\n when a model contains a custom field of type date the worker is unable to serialize the data for transmission\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/site-packages/rq/worker.py\", line 793, in perform_job\r\n rv = job.perform()\r\n File \"/usr/lib/python2.7/site-packages/rq/job.py\", line 599, in perform\r\n self._result = self._execute()\r\n File \"/usr/lib/python2.7/site-packages/rq/job.py\", line 605, in _execute\r\n return self.func(*self.args, **self.kwargs)\r\n File \"/opt/netbox/netbox/extras/webhooks_worker.py\", line 44, in process_webhook\r\n prepared_request = requests.Request(**params).prepare()\r\n File \"/usr/lib/python2.7/site-packages/requests/models.py\", line 259, in prepare\r\n hooks=self.hooks,\r\n File \"/usr/lib/python2.7/site-packages/requests/models.py\", line 307, in prepare\r\n self.prepare_body(data, files, json)\r\n File \"/usr/lib/python2.7/site-packages/requests/models.py\", line 427, in prepare_body\r\n body = json_dumps(json)\r\n File \"/usr/lib64/python2.7/site-packages/simplejson/__init__.py\", line 382, in dumps\r\n return _default_encoder.encode(obj)\r\n File \"/usr/lib64/python2.7/site-packages/simplejson/encoder.py\", line 291, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n File \"/usr/lib64/python2.7/site-packages/simplejson/encoder.py\", line 373, in iterencode\r\n return _iterencode(o, 0)\r\n File \"/usr/lib64/python2.7/site-packages/simplejson/encoder.py\", line 268, in default\r\n o.__class__.__name__)\r\nTypeError: Object of type date is not JSON serializable\r\n```\n", "before_files": [{"content": "import hashlib\nimport hmac\n\nimport requests\nfrom django_rq import job\n\nfrom extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES\n\n\n@job('default')\ndef process_webhook(webhook, data, model_class, event, timestamp):\n \"\"\"\n Make a POST request to the defined Webhook\n \"\"\"\n payload = {\n 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event],\n 'timestamp': timestamp,\n 'model': model_class.__name__,\n 'data': data\n }\n headers = {\n 'Content-Type': webhook.get_http_content_type_display(),\n }\n params = {\n 'method': 'POST',\n 'url': webhook.payload_url,\n 'headers': headers\n }\n\n if webhook.http_content_type == WEBHOOK_CT_JSON:\n params.update({'json': payload})\n elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED:\n params.update({'data': payload})\n\n prepared_request = requests.Request(**params).prepare()\n\n if webhook.secret != '':\n # sign the request with the secret\n hmac_prep = hmac.new(bytearray(webhook.secret, 'utf8'), prepared_request.body, digestmod=hashlib.sha512)\n prepared_request.headers['X-Hook-Signature'] = hmac_prep.hexdigest()\n\n with requests.Session() as session:\n session.verify = webhook.ssl_verification\n response = session.send(prepared_request)\n\n if response.status_code >= 200 and response.status_code <= 299:\n return 'Status {} returned, webhook successfully processed.'.format(response.status_code)\n else:\n raise requests.exceptions.RequestException(\n \"Status {} returned, webhook FAILED to process.\".format(response.status_code)\n )\n", "path": "netbox/extras/webhooks_worker.py"}]}
1,917
288
gh_patches_debug_31702
rasdani/github-patches
git_diff
napari__napari-6821
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Test vectors # Description This PR adds tests for the vectors layer, improves the doc strings and cleans up the code a tiny bit, but doesn't change any functionality. One question is - should the input parameters be `edge_width`, `edge_color`, and `length` for the width, color, and the multiplicative length factor for the vectors or should they be something else. They used to just be `width`, `color`, and `length` but I added `edge_` to make the parameters the same as for the `points` and `shapes` layer, though you could argue that for the `points` layer the parameters do different things and that in the vectors layer we don't have a `face` and an `edge` so it is just confusing. I'm open to suggestions - personally I like the consistency - but we can change it. Thoughts @bryantChhun @kevinyamauchi @jni? ## Type of change <!-- Please delete options that are not relevant. --> - [x] Bug-fix (non-breaking change which fixes an issue) # How has this been tested? <!-- Please describe the tests that you ran to verify your changes. --> - [x] adds `napari/layers/vectors/tests/test_vectors.py` ## Final checklist: - [x] My PR is the minimum possible work for the desired functionality - [x] I have commented my code, particularly in hard-to-understand areas - [x] I have made corresponding changes to the documentation - [x] I have added tests that prove my fix is effective or that my feature works </issue> <code> [start of napari/_app_model/actions/_view_actions.py] 1 """Actions related to the 'View' menu that do not require Qt. 2 3 View actions that do require Qt should go in 4 `napari/_qt/_qapp_model/qactions/_view.py`. 5 """ 6 7 from app_model.types import Action, ToggleRule 8 9 from napari._app_model.actions._toggle_action import ViewerToggleAction 10 from napari._app_model.constants import CommandId, MenuGroup, MenuId 11 from napari.settings import get_settings 12 13 VIEW_ACTIONS: list[Action] = [] 14 MENUID_DICT = {'axes': MenuId.VIEW_AXES, 'scale_bar': MenuId.VIEW_SCALEBAR} 15 16 for cmd, viewer_attr, sub_attr in ( 17 (CommandId.TOGGLE_VIEWER_AXES, 'axes', 'visible'), 18 (CommandId.TOGGLE_VIEWER_AXES_COLORED, 'axes', 'colored'), 19 (CommandId.TOGGLE_VIEWER_AXES_LABELS, 'axes', 'labels'), 20 (CommandId.TOGGLE_VIEWER_AXES_DASHED, 'axes', 'dashed'), 21 (CommandId.TOGGLE_VIEWER_AXES_ARROWS, 'axes', 'arrows'), 22 (CommandId.TOGGLE_VIEWER_SCALE_BAR, 'scale_bar', 'visible'), 23 (CommandId.TOGGLE_VIEWER_SCALE_BAR_COLORED, 'scale_bar', 'colored'), 24 (CommandId.TOGGLE_VIEWER_SCALE_BAR_TICKS, 'scale_bar', 'ticks'), 25 ): 26 VIEW_ACTIONS.append( 27 ViewerToggleAction( 28 id=cmd, 29 title=cmd.command_title, 30 viewer_attribute=viewer_attr, 31 sub_attribute=sub_attr, 32 menus=[{'id': MENUID_DICT[viewer_attr]}], 33 ) 34 ) 35 36 37 def _tooltip_visibility_toggle() -> None: 38 settings = get_settings().appearance 39 settings.layer_tooltip_visibility = not settings.layer_tooltip_visibility 40 41 42 def _get_current_tooltip_visibility() -> bool: 43 return get_settings().appearance.layer_tooltip_visibility 44 45 46 VIEW_ACTIONS.extend( 47 [ 48 # TODO: this could be made into a toggle setting Action subclass 49 # using a similar pattern to the above ViewerToggleAction classes 50 Action( 51 id=CommandId.TOGGLE_LAYER_TOOLTIPS, 52 title=CommandId.TOGGLE_LAYER_TOOLTIPS.command_title, 53 menus=[ 54 { 55 'id': MenuId.MENUBAR_VIEW, 56 'group': MenuGroup.RENDER, 57 'order': 10, 58 } 59 ], 60 callback=_tooltip_visibility_toggle, 61 toggled=ToggleRule(get_current=_get_current_tooltip_visibility), 62 ), 63 ] 64 ) 65 [end of napari/_app_model/actions/_view_actions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/napari/_app_model/actions/_view_actions.py b/napari/_app_model/actions/_view_actions.py deleted file mode 100644 --- a/napari/_app_model/actions/_view_actions.py +++ /dev/null @@ -1,64 +0,0 @@ -"""Actions related to the 'View' menu that do not require Qt. - -View actions that do require Qt should go in -`napari/_qt/_qapp_model/qactions/_view.py`. -""" - -from app_model.types import Action, ToggleRule - -from napari._app_model.actions._toggle_action import ViewerToggleAction -from napari._app_model.constants import CommandId, MenuGroup, MenuId -from napari.settings import get_settings - -VIEW_ACTIONS: list[Action] = [] -MENUID_DICT = {'axes': MenuId.VIEW_AXES, 'scale_bar': MenuId.VIEW_SCALEBAR} - -for cmd, viewer_attr, sub_attr in ( - (CommandId.TOGGLE_VIEWER_AXES, 'axes', 'visible'), - (CommandId.TOGGLE_VIEWER_AXES_COLORED, 'axes', 'colored'), - (CommandId.TOGGLE_VIEWER_AXES_LABELS, 'axes', 'labels'), - (CommandId.TOGGLE_VIEWER_AXES_DASHED, 'axes', 'dashed'), - (CommandId.TOGGLE_VIEWER_AXES_ARROWS, 'axes', 'arrows'), - (CommandId.TOGGLE_VIEWER_SCALE_BAR, 'scale_bar', 'visible'), - (CommandId.TOGGLE_VIEWER_SCALE_BAR_COLORED, 'scale_bar', 'colored'), - (CommandId.TOGGLE_VIEWER_SCALE_BAR_TICKS, 'scale_bar', 'ticks'), -): - VIEW_ACTIONS.append( - ViewerToggleAction( - id=cmd, - title=cmd.command_title, - viewer_attribute=viewer_attr, - sub_attribute=sub_attr, - menus=[{'id': MENUID_DICT[viewer_attr]}], - ) - ) - - -def _tooltip_visibility_toggle() -> None: - settings = get_settings().appearance - settings.layer_tooltip_visibility = not settings.layer_tooltip_visibility - - -def _get_current_tooltip_visibility() -> bool: - return get_settings().appearance.layer_tooltip_visibility - - -VIEW_ACTIONS.extend( - [ - # TODO: this could be made into a toggle setting Action subclass - # using a similar pattern to the above ViewerToggleAction classes - Action( - id=CommandId.TOGGLE_LAYER_TOOLTIPS, - title=CommandId.TOGGLE_LAYER_TOOLTIPS.command_title, - menus=[ - { - 'id': MenuId.MENUBAR_VIEW, - 'group': MenuGroup.RENDER, - 'order': 10, - } - ], - callback=_tooltip_visibility_toggle, - toggled=ToggleRule(get_current=_get_current_tooltip_visibility), - ), - ] -)
{"golden_diff": "diff --git a/napari/_app_model/actions/_view_actions.py b/napari/_app_model/actions/_view_actions.py\ndeleted file mode 100644\n--- a/napari/_app_model/actions/_view_actions.py\n+++ /dev/null\n@@ -1,64 +0,0 @@\n-\"\"\"Actions related to the 'View' menu that do not require Qt.\n-\n-View actions that do require Qt should go in\n-`napari/_qt/_qapp_model/qactions/_view.py`.\n-\"\"\"\n-\n-from app_model.types import Action, ToggleRule\n-\n-from napari._app_model.actions._toggle_action import ViewerToggleAction\n-from napari._app_model.constants import CommandId, MenuGroup, MenuId\n-from napari.settings import get_settings\n-\n-VIEW_ACTIONS: list[Action] = []\n-MENUID_DICT = {'axes': MenuId.VIEW_AXES, 'scale_bar': MenuId.VIEW_SCALEBAR}\n-\n-for cmd, viewer_attr, sub_attr in (\n- (CommandId.TOGGLE_VIEWER_AXES, 'axes', 'visible'),\n- (CommandId.TOGGLE_VIEWER_AXES_COLORED, 'axes', 'colored'),\n- (CommandId.TOGGLE_VIEWER_AXES_LABELS, 'axes', 'labels'),\n- (CommandId.TOGGLE_VIEWER_AXES_DASHED, 'axes', 'dashed'),\n- (CommandId.TOGGLE_VIEWER_AXES_ARROWS, 'axes', 'arrows'),\n- (CommandId.TOGGLE_VIEWER_SCALE_BAR, 'scale_bar', 'visible'),\n- (CommandId.TOGGLE_VIEWER_SCALE_BAR_COLORED, 'scale_bar', 'colored'),\n- (CommandId.TOGGLE_VIEWER_SCALE_BAR_TICKS, 'scale_bar', 'ticks'),\n-):\n- VIEW_ACTIONS.append(\n- ViewerToggleAction(\n- id=cmd,\n- title=cmd.command_title,\n- viewer_attribute=viewer_attr,\n- sub_attribute=sub_attr,\n- menus=[{'id': MENUID_DICT[viewer_attr]}],\n- )\n- )\n-\n-\n-def _tooltip_visibility_toggle() -> None:\n- settings = get_settings().appearance\n- settings.layer_tooltip_visibility = not settings.layer_tooltip_visibility\n-\n-\n-def _get_current_tooltip_visibility() -> bool:\n- return get_settings().appearance.layer_tooltip_visibility\n-\n-\n-VIEW_ACTIONS.extend(\n- [\n- # TODO: this could be made into a toggle setting Action subclass\n- # using a similar pattern to the above ViewerToggleAction classes\n- Action(\n- id=CommandId.TOGGLE_LAYER_TOOLTIPS,\n- title=CommandId.TOGGLE_LAYER_TOOLTIPS.command_title,\n- menus=[\n- {\n- 'id': MenuId.MENUBAR_VIEW,\n- 'group': MenuGroup.RENDER,\n- 'order': 10,\n- }\n- ],\n- callback=_tooltip_visibility_toggle,\n- toggled=ToggleRule(get_current=_get_current_tooltip_visibility),\n- ),\n- ]\n-)\n", "issue": "Test vectors\n# Description\r\nThis PR adds tests for the vectors layer, improves the doc strings and cleans up the code a tiny bit, but doesn't change any functionality.\r\n\r\nOne question is - should the input parameters be \r\n`edge_width`, `edge_color`, and `length` for the width, color, and the multiplicative length factor for the vectors or should they be something else. They used to just be `width`, `color`, and `length` but I added `edge_` to make the parameters the same as for the `points` and `shapes` layer, though you could argue that for the `points` layer the parameters do different things and that in the vectors layer we don't have a `face` and an `edge` so it is just confusing. I'm open to suggestions - personally I like the consistency - but we can change it. Thoughts @bryantChhun @kevinyamauchi @jni?\r\n \r\n## Type of change\r\n<!-- Please delete options that are not relevant. -->\r\n- [x] Bug-fix (non-breaking change which fixes an issue)\r\n\r\n# How has this been tested?\r\n<!-- Please describe the tests that you ran to verify your changes. -->\r\n- [x] adds `napari/layers/vectors/tests/test_vectors.py`\r\n\r\n## Final checklist:\r\n- [x] My PR is the minimum possible work for the desired functionality\r\n- [x] I have commented my code, particularly in hard-to-understand areas\r\n- [x] I have made corresponding changes to the documentation\r\n- [x] I have added tests that prove my fix is effective or that my feature works\r\n\n", "before_files": [{"content": "\"\"\"Actions related to the 'View' menu that do not require Qt.\n\nView actions that do require Qt should go in\n`napari/_qt/_qapp_model/qactions/_view.py`.\n\"\"\"\n\nfrom app_model.types import Action, ToggleRule\n\nfrom napari._app_model.actions._toggle_action import ViewerToggleAction\nfrom napari._app_model.constants import CommandId, MenuGroup, MenuId\nfrom napari.settings import get_settings\n\nVIEW_ACTIONS: list[Action] = []\nMENUID_DICT = {'axes': MenuId.VIEW_AXES, 'scale_bar': MenuId.VIEW_SCALEBAR}\n\nfor cmd, viewer_attr, sub_attr in (\n (CommandId.TOGGLE_VIEWER_AXES, 'axes', 'visible'),\n (CommandId.TOGGLE_VIEWER_AXES_COLORED, 'axes', 'colored'),\n (CommandId.TOGGLE_VIEWER_AXES_LABELS, 'axes', 'labels'),\n (CommandId.TOGGLE_VIEWER_AXES_DASHED, 'axes', 'dashed'),\n (CommandId.TOGGLE_VIEWER_AXES_ARROWS, 'axes', 'arrows'),\n (CommandId.TOGGLE_VIEWER_SCALE_BAR, 'scale_bar', 'visible'),\n (CommandId.TOGGLE_VIEWER_SCALE_BAR_COLORED, 'scale_bar', 'colored'),\n (CommandId.TOGGLE_VIEWER_SCALE_BAR_TICKS, 'scale_bar', 'ticks'),\n):\n VIEW_ACTIONS.append(\n ViewerToggleAction(\n id=cmd,\n title=cmd.command_title,\n viewer_attribute=viewer_attr,\n sub_attribute=sub_attr,\n menus=[{'id': MENUID_DICT[viewer_attr]}],\n )\n )\n\n\ndef _tooltip_visibility_toggle() -> None:\n settings = get_settings().appearance\n settings.layer_tooltip_visibility = not settings.layer_tooltip_visibility\n\n\ndef _get_current_tooltip_visibility() -> bool:\n return get_settings().appearance.layer_tooltip_visibility\n\n\nVIEW_ACTIONS.extend(\n [\n # TODO: this could be made into a toggle setting Action subclass\n # using a similar pattern to the above ViewerToggleAction classes\n Action(\n id=CommandId.TOGGLE_LAYER_TOOLTIPS,\n title=CommandId.TOGGLE_LAYER_TOOLTIPS.command_title,\n menus=[\n {\n 'id': MenuId.MENUBAR_VIEW,\n 'group': MenuGroup.RENDER,\n 'order': 10,\n }\n ],\n callback=_tooltip_visibility_toggle,\n toggled=ToggleRule(get_current=_get_current_tooltip_visibility),\n ),\n ]\n)\n", "path": "napari/_app_model/actions/_view_actions.py"}]}
1,549
661
gh_patches_debug_10596
rasdani/github-patches
git_diff
xonsh__xonsh-1630
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Autocomplete: don't display full path Using `prompt_kit`, when completing a path such as `/var/log/<TAB>`, the autocompletion menu shows fully prefixed entries. The problem is that when the path is very deep, the autocomplete panel starts to give unreadable results (see attached screenshot). The proposed solution is to only display the `os.path.basename` of the autocompleted path, suffixed by `/` if it is a directory. ![screenshot from 2016-05-19 10-33-09](https://cloud.githubusercontent.com/assets/14598/15397392/1170b2b8-1dae-11e6-85b4-ea31504f9346.png) </issue> <code> [start of xonsh/ptk/completer.py] 1 # -*- coding: utf-8 -*- 2 """Completer implementation to use with prompt_toolkit.""" 3 import os 4 import builtins 5 6 from prompt_toolkit.layout.dimension import LayoutDimension 7 from prompt_toolkit.completion import Completer, Completion 8 9 10 class PromptToolkitCompleter(Completer): 11 """Simple prompt_toolkit Completer object. 12 13 It just redirects requests to normal Xonsh completer. 14 """ 15 16 def __init__(self, completer, ctx): 17 """Takes instance of xonsh.completer.Completer and dict with context.""" 18 self.completer = completer 19 self.ctx = ctx 20 21 def get_completions(self, document, complete_event): 22 """Returns a generator for list of completions.""" 23 24 # Only generate completions when the user hits tab. 25 if complete_event.completion_requested: 26 if self.completer is None: 27 yield from [] 28 else: 29 line = document.current_line.lstrip() 30 endidx = document.cursor_position_col 31 begidx = line[:endidx].rfind(' ') + 1 if line[:endidx].rfind(' ') >= 0 else 0 32 prefix = line[begidx:endidx] 33 line = builtins.aliases.expand_alias(line) 34 completions, l = self.completer.complete(prefix, 35 line, 36 begidx, 37 endidx, 38 self.ctx) 39 if len(completions) <= 1: 40 pass 41 elif len(os.path.commonprefix(completions)) <= len(prefix): 42 self.reserve_space() 43 for comp in completions: 44 yield Completion(comp, -l) 45 46 def reserve_space(self): 47 cli = builtins.__xonsh_shell__.shell.prompter.cli 48 window = cli.application.layout.children[0].content.children[1] 49 50 if window and window.render_info: 51 h = window.render_info.content_height 52 r = builtins.__xonsh_env__.get('COMPLETIONS_MENU_ROWS') 53 size = h + r 54 55 def comp_height(cli): 56 # If there is an autocompletion menu to be shown, make sure that o 57 # layout has at least a minimal height in order to display it. 58 if not cli.is_done: 59 return LayoutDimension(min=size) 60 else: 61 return LayoutDimension() 62 window._height = comp_height 63 [end of xonsh/ptk/completer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/xonsh/ptk/completer.py b/xonsh/ptk/completer.py --- a/xonsh/ptk/completer.py +++ b/xonsh/ptk/completer.py @@ -40,7 +40,11 @@ pass elif len(os.path.commonprefix(completions)) <= len(prefix): self.reserve_space() + prefix, _, compprefix = prefix.rpartition('.') for comp in completions: + if comp.rsplit('.', 1)[0] in prefix: + comp = comp.rsplit('.', 1)[-1] + l = len(compprefix) if compprefix in comp else 0 yield Completion(comp, -l) def reserve_space(self):
{"golden_diff": "diff --git a/xonsh/ptk/completer.py b/xonsh/ptk/completer.py\n--- a/xonsh/ptk/completer.py\n+++ b/xonsh/ptk/completer.py\n@@ -40,7 +40,11 @@\n pass\n elif len(os.path.commonprefix(completions)) <= len(prefix):\n self.reserve_space()\n+ prefix, _, compprefix = prefix.rpartition('.')\n for comp in completions:\n+ if comp.rsplit('.', 1)[0] in prefix:\n+ comp = comp.rsplit('.', 1)[-1]\n+ l = len(compprefix) if compprefix in comp else 0\n yield Completion(comp, -l)\n \n def reserve_space(self):\n", "issue": "Autocomplete: don't display full path\nUsing `prompt_kit`, when completing a path such as `/var/log/<TAB>`, the autocompletion menu shows fully prefixed entries. The problem is that when the path is very deep, the autocomplete panel starts to give unreadable results (see attached screenshot).\n\nThe proposed solution is to only display the `os.path.basename` of the autocompleted path, suffixed by `/` if it is a directory.\n\n![screenshot from 2016-05-19 10-33-09](https://cloud.githubusercontent.com/assets/14598/15397392/1170b2b8-1dae-11e6-85b4-ea31504f9346.png)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Completer implementation to use with prompt_toolkit.\"\"\"\nimport os\nimport builtins\n\nfrom prompt_toolkit.layout.dimension import LayoutDimension\nfrom prompt_toolkit.completion import Completer, Completion\n\n\nclass PromptToolkitCompleter(Completer):\n \"\"\"Simple prompt_toolkit Completer object.\n\n It just redirects requests to normal Xonsh completer.\n \"\"\"\n\n def __init__(self, completer, ctx):\n \"\"\"Takes instance of xonsh.completer.Completer and dict with context.\"\"\"\n self.completer = completer\n self.ctx = ctx\n\n def get_completions(self, document, complete_event):\n \"\"\"Returns a generator for list of completions.\"\"\"\n\n # Only generate completions when the user hits tab.\n if complete_event.completion_requested:\n if self.completer is None:\n yield from []\n else:\n line = document.current_line.lstrip()\n endidx = document.cursor_position_col\n begidx = line[:endidx].rfind(' ') + 1 if line[:endidx].rfind(' ') >= 0 else 0\n prefix = line[begidx:endidx]\n line = builtins.aliases.expand_alias(line)\n completions, l = self.completer.complete(prefix,\n line,\n begidx,\n endidx,\n self.ctx)\n if len(completions) <= 1:\n pass\n elif len(os.path.commonprefix(completions)) <= len(prefix):\n self.reserve_space()\n for comp in completions:\n yield Completion(comp, -l)\n\n def reserve_space(self):\n cli = builtins.__xonsh_shell__.shell.prompter.cli\n window = cli.application.layout.children[0].content.children[1]\n\n if window and window.render_info:\n h = window.render_info.content_height\n r = builtins.__xonsh_env__.get('COMPLETIONS_MENU_ROWS')\n size = h + r\n\n def comp_height(cli):\n # If there is an autocompletion menu to be shown, make sure that o\n # layout has at least a minimal height in order to display it.\n if not cli.is_done:\n return LayoutDimension(min=size)\n else:\n return LayoutDimension()\n window._height = comp_height\n", "path": "xonsh/ptk/completer.py"}]}
1,338
171
gh_patches_debug_33734
rasdani/github-patches
git_diff
3cn-ecn__nantralPlatform-484
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Problème avec les liens vers les auteur.ic.es des suggestions Quand quelqu'un fait une suggestion depuis le site, le lien pour avoir le nom de la personne ne fonctionne pas. </issue> <code> [start of server/apps/home/forms.py] 1 from django import forms 2 3 class SuggestionForm(forms.Form): 4 title = forms.CharField(max_length=50, required=True) 5 description = forms.CharField(widget=forms.Textarea) 6 [end of server/apps/home/forms.py] [start of server/apps/utils/github.py] 1 import requests 2 from django.conf import settings 3 4 5 def create_issue(title: str, body: str): 6 issue = { 7 'title': title, 8 'body': body 9 } 10 resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues', 11 json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN)) 12 if resp.status_code != 201: 13 raise Exception(f'Error while posting issue to Github: {resp.reason}') 14 return resp.json()['number'] 15 16 17 def close_issue(number: int): 18 """Function to close an issue in the repo.""" 19 update = {'state': 'closed'} 20 requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues/{number}', 21 json=update, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN)) 22 [end of server/apps/utils/github.py] [start of server/apps/home/views.py] 1 from datetime import * 2 from typing import List 3 from django.contrib.sites.shortcuts import get_current_site 4 from django.db.models.query import QuerySet 5 from django.shortcuts import render, redirect 6 from django.views.generic import TemplateView, FormView 7 from django.contrib import messages 8 from django.contrib.auth.mixins import LoginRequiredMixin 9 10 from apps.event.models import BaseEvent 11 from apps.post.models import Post 12 from apps.utils.github import create_issue 13 14 from .forms import SuggestionForm 15 16 17 class HomeView(LoginRequiredMixin, TemplateView): 18 template_name = 'home/home.html' 19 20 def get_context_data(self, **kwargs): 21 # Call the base implementation first to get a context 22 context = super().get_context_data(**kwargs) 23 posts: List[Post] = Post.objects.filter( 24 publication_date__gte=date.today()-timedelta(days=10)).order_by('-publication_date') 25 context['posts'] = [ 26 post for post in posts if post.can_view(self.request.user)] 27 return context 28 29 30 class SuggestionView(LoginRequiredMixin, FormView): 31 template_name = 'home/suggestions.html' 32 form_class = SuggestionForm 33 34 def form_valid(self, form): 35 create_issue( 36 title=form.cleaned_data['title'], 37 body=f"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour découvrir qui propose ça.</a>" 38 ) 39 messages.success( 40 self.request, 'Votre suggestion a été enregistrée merci') 41 return redirect('home:home') 42 43 44 def handler404(request, *args, **argv): 45 response = render(request, '404.html', context={}, status=404) 46 return response 47 48 49 def handler500(request, *args, **argv): 50 response = render(request, '500.html', context={}, 51 status=500) 52 return response 53 54 55 def event_sort(events, request): 56 tri = {} 57 jours = ["Lundi", "Mardi", "Mercredi", 58 "Jeudi", "Vendredi", "Samedi", "Dimanche"] 59 mois = ["Janvier", "Février", "Mars", "Avril", "Mai", "Juin", 60 "Juillet", "Août", "Septembre", "Octobre", "Novembre", "Décembre"] 61 for event in events: 62 if event.date.date() == date.today(): 63 if "Aujourd'hui" in tri: 64 tri["Aujourd'hui"].append( 65 (event, event.is_participating(request.user))) 66 else: 67 tri["Aujourd'hui"] = list() 68 tri["Aujourd'hui"].append( 69 (event, event.is_participating(request.user))) 70 elif event.date.date() == (date.today()+timedelta(days=1)): 71 if "Demain" in tri: 72 tri["Demain"].append( 73 (event, event.is_participating(request.user))) 74 else: 75 tri["Demain"] = list() 76 tri["Demain"].append( 77 (event, event.is_participating(request.user))) 78 else: 79 written_date = jours[event.date.weekday( 80 )] + " " + str(event.date.day) + " " + mois[event.date.month-1] 81 if written_date in tri: 82 tri[written_date].append( 83 (event, event.is_participating(request.user))) 84 else: 85 tri[written_date] = list() 86 tri[written_date].append( 87 (event, event.is_participating(request.user))) 88 return tri 89 [end of server/apps/home/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/server/apps/home/forms.py b/server/apps/home/forms.py --- a/server/apps/home/forms.py +++ b/server/apps/home/forms.py @@ -1,5 +1,14 @@ from django import forms +TYPE_CHOICES = ( + (1, ("Bug")), + (2, ("Suggestion")) +) + + class SuggestionForm(forms.Form): title = forms.CharField(max_length=50, required=True) description = forms.CharField(widget=forms.Textarea) + suggestionOrBug = forms.ChoiceField(label="Type", + choices=TYPE_CHOICES, + required=True) diff --git a/server/apps/home/views.py b/server/apps/home/views.py --- a/server/apps/home/views.py +++ b/server/apps/home/views.py @@ -34,7 +34,8 @@ def form_valid(self, form): create_issue( title=form.cleaned_data['title'], - body=f"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour découvrir qui propose ça.</a>" + body=f"{form.cleaned_data['description']} <br/> [Clique pour découvrir qui propose ça.](http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url()})", + label=form.cleaned_data['suggestionOrBug'] ) messages.success( self.request, 'Votre suggestion a été enregistrée merci') diff --git a/server/apps/utils/github.py b/server/apps/utils/github.py --- a/server/apps/utils/github.py +++ b/server/apps/utils/github.py @@ -2,15 +2,18 @@ from django.conf import settings -def create_issue(title: str, body: str): +def create_issue(title: str, body: str, label): + label = "bug" if int(label) == 1 else "suggestion" issue = { 'title': title, - 'body': body + 'body': body, + 'labels': [label] } resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues', json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN)) if resp.status_code != 201: - raise Exception(f'Error while posting issue to Github: {resp.reason}') + raise Exception( + f'Error while posting issue to Github: {resp.reason}') return resp.json()['number']
{"golden_diff": "diff --git a/server/apps/home/forms.py b/server/apps/home/forms.py\n--- a/server/apps/home/forms.py\n+++ b/server/apps/home/forms.py\n@@ -1,5 +1,14 @@\n from django import forms\n \n+TYPE_CHOICES = (\n+ (1, (\"Bug\")),\n+ (2, (\"Suggestion\"))\n+)\n+\n+\n class SuggestionForm(forms.Form):\n title = forms.CharField(max_length=50, required=True)\n description = forms.CharField(widget=forms.Textarea)\n+ suggestionOrBug = forms.ChoiceField(label=\"Type\",\n+ choices=TYPE_CHOICES,\n+ required=True)\ndiff --git a/server/apps/home/views.py b/server/apps/home/views.py\n--- a/server/apps/home/views.py\n+++ b/server/apps/home/views.py\n@@ -34,7 +34,8 @@\n def form_valid(self, form):\n create_issue(\n title=form.cleaned_data['title'],\n- body=f\"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour d\u00e9couvrir qui propose \u00e7a.</a>\"\n+ body=f\"{form.cleaned_data['description']} <br/> [Clique pour d\u00e9couvrir qui propose \u00e7a.](http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url()})\",\n+ label=form.cleaned_data['suggestionOrBug']\n )\n messages.success(\n self.request, 'Votre suggestion a \u00e9t\u00e9 enregistr\u00e9e merci')\ndiff --git a/server/apps/utils/github.py b/server/apps/utils/github.py\n--- a/server/apps/utils/github.py\n+++ b/server/apps/utils/github.py\n@@ -2,15 +2,18 @@\n from django.conf import settings\n \n \n-def create_issue(title: str, body: str):\n+def create_issue(title: str, body: str, label):\n+ label = \"bug\" if int(label) == 1 else \"suggestion\"\n issue = {\n 'title': title,\n- 'body': body\n+ 'body': body,\n+ 'labels': [label]\n }\n resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues',\n json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))\n if resp.status_code != 201:\n- raise Exception(f'Error while posting issue to Github: {resp.reason}')\n+ raise Exception(\n+ f'Error while posting issue to Github: {resp.reason}')\n return resp.json()['number']\n", "issue": "Probl\u00e8me avec les liens vers les auteur.ic.es des suggestions\nQuand quelqu'un fait une suggestion depuis le site, le lien pour avoir le nom de la personne ne fonctionne pas.\n", "before_files": [{"content": "from django import forms\n\nclass SuggestionForm(forms.Form):\n title = forms.CharField(max_length=50, required=True)\n description = forms.CharField(widget=forms.Textarea)\n", "path": "server/apps/home/forms.py"}, {"content": "import requests\nfrom django.conf import settings\n\n\ndef create_issue(title: str, body: str):\n issue = {\n 'title': title,\n 'body': body\n }\n resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues',\n json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))\n if resp.status_code != 201:\n raise Exception(f'Error while posting issue to Github: {resp.reason}')\n return resp.json()['number']\n\n\ndef close_issue(number: int):\n \"\"\"Function to close an issue in the repo.\"\"\"\n update = {'state': 'closed'}\n requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues/{number}',\n json=update, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))\n", "path": "server/apps/utils/github.py"}, {"content": "from datetime import *\nfrom typing import List\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.db.models.query import QuerySet\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView, FormView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom apps.event.models import BaseEvent\nfrom apps.post.models import Post\nfrom apps.utils.github import create_issue\n\nfrom .forms import SuggestionForm\n\n\nclass HomeView(LoginRequiredMixin, TemplateView):\n template_name = 'home/home.html'\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super().get_context_data(**kwargs)\n posts: List[Post] = Post.objects.filter(\n publication_date__gte=date.today()-timedelta(days=10)).order_by('-publication_date')\n context['posts'] = [\n post for post in posts if post.can_view(self.request.user)]\n return context\n\n\nclass SuggestionView(LoginRequiredMixin, FormView):\n template_name = 'home/suggestions.html'\n form_class = SuggestionForm\n\n def form_valid(self, form):\n create_issue(\n title=form.cleaned_data['title'],\n body=f\"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour d\u00e9couvrir qui propose \u00e7a.</a>\"\n )\n messages.success(\n self.request, 'Votre suggestion a \u00e9t\u00e9 enregistr\u00e9e merci')\n return redirect('home:home')\n\n\ndef handler404(request, *args, **argv):\n response = render(request, '404.html', context={}, status=404)\n return response\n\n\ndef handler500(request, *args, **argv):\n response = render(request, '500.html', context={},\n status=500)\n return response\n\n\ndef event_sort(events, request):\n tri = {}\n jours = [\"Lundi\", \"Mardi\", \"Mercredi\",\n \"Jeudi\", \"Vendredi\", \"Samedi\", \"Dimanche\"]\n mois = [\"Janvier\", \"F\u00e9vrier\", \"Mars\", \"Avril\", \"Mai\", \"Juin\",\n \"Juillet\", \"Ao\u00fbt\", \"Septembre\", \"Octobre\", \"Novembre\", \"D\u00e9cembre\"]\n for event in events:\n if event.date.date() == date.today():\n if \"Aujourd'hui\" in tri:\n tri[\"Aujourd'hui\"].append(\n (event, event.is_participating(request.user)))\n else:\n tri[\"Aujourd'hui\"] = list()\n tri[\"Aujourd'hui\"].append(\n (event, event.is_participating(request.user)))\n elif event.date.date() == (date.today()+timedelta(days=1)):\n if \"Demain\" in tri:\n tri[\"Demain\"].append(\n (event, event.is_participating(request.user)))\n else:\n tri[\"Demain\"] = list()\n tri[\"Demain\"].append(\n (event, event.is_participating(request.user)))\n else:\n written_date = jours[event.date.weekday(\n )] + \" \" + str(event.date.day) + \" \" + mois[event.date.month-1]\n if written_date in tri:\n tri[written_date].append(\n (event, event.is_participating(request.user)))\n else:\n tri[written_date] = list()\n tri[written_date].append(\n (event, event.is_participating(request.user)))\n return tri\n", "path": "server/apps/home/views.py"}]}
1,813
539
gh_patches_debug_17126
rasdani/github-patches
git_diff
rucio__rucio-5505
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Deprecation message in Paramiko Motivation ---------- Paramiko outputs a deprecation message due to an outdated security algorithm. We do not depend on that algorithm. https://github.com/paramiko/paramiko/pull/2039 Modification ------------ The paramiko team is aware of the problem. They opened a PR to fix it. </issue> <code> [start of lib/rucio/common/extra.py] 1 # -*- coding: utf-8 -*- 2 # Copyright 2021 CERN 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 # 16 # Authors: 17 # - Benedikt Ziemons <[email protected]>, 2021 18 19 import importlib 20 21 22 def import_extras(module_list): 23 out = dict() 24 for mod in module_list: 25 out[mod] = None 26 try: 27 out[mod] = importlib.import_module(mod) 28 except ImportError: 29 pass 30 return out 31 [end of lib/rucio/common/extra.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/rucio/common/extra.py b/lib/rucio/common/extra.py --- a/lib/rucio/common/extra.py +++ b/lib/rucio/common/extra.py @@ -17,6 +17,7 @@ # - Benedikt Ziemons <[email protected]>, 2021 import importlib +import warnings def import_extras(module_list): @@ -24,7 +25,12 @@ for mod in module_list: out[mod] = None try: - out[mod] = importlib.import_module(mod) + with warnings.catch_warnings(): + # TODO: remove when https://github.com/paramiko/paramiko/issues/2038 is fixed + warnings.filterwarnings('ignore', 'Blowfish has been deprecated', module='paramiko') + # TODO: deprecated python 2 and 3.6 too ... + warnings.filterwarnings('ignore', 'Python .* is no longer supported', module='paramiko') + out[mod] = importlib.import_module(mod) except ImportError: pass return out
{"golden_diff": "diff --git a/lib/rucio/common/extra.py b/lib/rucio/common/extra.py\n--- a/lib/rucio/common/extra.py\n+++ b/lib/rucio/common/extra.py\n@@ -17,6 +17,7 @@\n # - Benedikt Ziemons <[email protected]>, 2021\n \n import importlib\n+import warnings\n \n \n def import_extras(module_list):\n@@ -24,7 +25,12 @@\n for mod in module_list:\n out[mod] = None\n try:\n- out[mod] = importlib.import_module(mod)\n+ with warnings.catch_warnings():\n+ # TODO: remove when https://github.com/paramiko/paramiko/issues/2038 is fixed\n+ warnings.filterwarnings('ignore', 'Blowfish has been deprecated', module='paramiko')\n+ # TODO: deprecated python 2 and 3.6 too ...\n+ warnings.filterwarnings('ignore', 'Python .* is no longer supported', module='paramiko')\n+ out[mod] = importlib.import_module(mod)\n except ImportError:\n pass\n return out\n", "issue": "Deprecation message in Paramiko\nMotivation\r\n----------\r\nParamiko outputs a deprecation message due to an outdated security algorithm. We do not depend on that algorithm.\r\nhttps://github.com/paramiko/paramiko/pull/2039\r\n\r\nModification\r\n------------\r\nThe paramiko team is aware of the problem. They opened a PR to fix it.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 CERN\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Benedikt Ziemons <[email protected]>, 2021\n\nimport importlib\n\n\ndef import_extras(module_list):\n out = dict()\n for mod in module_list:\n out[mod] = None\n try:\n out[mod] = importlib.import_module(mod)\n except ImportError:\n pass\n return out\n", "path": "lib/rucio/common/extra.py"}]}
897
256
gh_patches_debug_14548
rasdani/github-patches
git_diff
zestedesavoir__zds-site-5261
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ajouter les unités temporelles sur les graphs des stats Cete amélioration est demandée par un auteur : > Ce serait bien, aussi, de mettre les unités à côté du temps. Parce que là, j’ai 03:51, mais je ne sais pas si c’est min:sec ou sec:mil </issue> <code> [start of zds/utils/templatetags/seconds_to_duration.py] 1 from django import template 2 import datetime 3 4 register = template.Library() 5 6 7 # TODO add unit test 8 @register.filter('seconds_to_duration') 9 def seconds_to_duration(value): 10 """ 11 Display a human-readable reading-time (or any other duration) 12 from a duration in seconds. 13 """ 14 if value <= 0: 15 return '' 16 17 duration = datetime.timedelta(seconds=value) 18 return str(duration) 19 [end of zds/utils/templatetags/seconds_to_duration.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/utils/templatetags/seconds_to_duration.py b/zds/utils/templatetags/seconds_to_duration.py --- a/zds/utils/templatetags/seconds_to_duration.py +++ b/zds/utils/templatetags/seconds_to_duration.py @@ -4,6 +4,14 @@ register = template.Library() +# https://stackoverflow.com/a/8907269/2226755 +def strfdelta(tdelta, fmt): + d = {'days': tdelta.days} + d['hours'], rem = divmod(tdelta.seconds, 3600) + d['minutes'], d['seconds'] = divmod(rem, 60) + return fmt.format(**d) + + # TODO add unit test @register.filter('seconds_to_duration') def seconds_to_duration(value): @@ -15,4 +23,7 @@ return '' duration = datetime.timedelta(seconds=value) - return str(duration) + if duration < 3600 + return strfdelta(duration, '{minutes}m{seconds}s') + else + return strfdelta(duration, '{hours}h{minutes}m{seconds}s')
{"golden_diff": "diff --git a/zds/utils/templatetags/seconds_to_duration.py b/zds/utils/templatetags/seconds_to_duration.py\n--- a/zds/utils/templatetags/seconds_to_duration.py\n+++ b/zds/utils/templatetags/seconds_to_duration.py\n@@ -4,6 +4,14 @@\n register = template.Library()\n \n \n+# https://stackoverflow.com/a/8907269/2226755\n+def strfdelta(tdelta, fmt):\n+ d = {'days': tdelta.days}\n+ d['hours'], rem = divmod(tdelta.seconds, 3600)\n+ d['minutes'], d['seconds'] = divmod(rem, 60)\n+ return fmt.format(**d)\n+\n+\n # TODO add unit test\n @register.filter('seconds_to_duration')\n def seconds_to_duration(value):\n@@ -15,4 +23,7 @@\n return ''\n \n duration = datetime.timedelta(seconds=value)\n- return str(duration)\n+ if duration < 3600\n+ return strfdelta(duration, '{minutes}m{seconds}s')\n+ else\n+ return strfdelta(duration, '{hours}h{minutes}m{seconds}s')\n", "issue": "Ajouter les unit\u00e9s temporelles sur les graphs des stats\nCete am\u00e9lioration est demand\u00e9e par un auteur : \r\n\r\n> Ce serait bien, aussi, de mettre les unit\u00e9s \u00e0 c\u00f4t\u00e9 du temps. Parce que l\u00e0, j\u2019ai 03:51, mais je ne sais pas si c\u2019est min:sec ou sec:mil\n", "before_files": [{"content": "from django import template\nimport datetime\n\nregister = template.Library()\n\n\n# TODO add unit test\[email protected]('seconds_to_duration')\ndef seconds_to_duration(value):\n \"\"\"\n Display a human-readable reading-time (or any other duration)\n from a duration in seconds.\n \"\"\"\n if value <= 0:\n return ''\n\n duration = datetime.timedelta(seconds=value)\n return str(duration)\n", "path": "zds/utils/templatetags/seconds_to_duration.py"}]}
742
277
gh_patches_debug_1553
rasdani/github-patches
git_diff
feast-dev__feast-3756
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Postgres engine default keepalives_idle value causes setsockopt(TCP_KEEPIDLE) invalid value Get `OperationalError: connection to server at "localhost" (127.0.0.1), port 5432 failed: setsockopt(TCP_KEEPIDLE) failed: Invalid argument` when run `feast apply`. Because of `keepalives_idle=config.keepalives_idle` field in function '_get_conn' in `infra/utils/postgres/connection_utils.py` file. For example, to avoid this error I need to pass 'keepalives_idle=1', but that argument isn't parsed for the registry in feature_store.yaml and pass 'keepalives_idle=0' by default setting in `infra/utils/postgres/postgres_config.py`. - Version: 0.33.1 - Platform: linux ubuntu 20.04 - Subsystem: ## Possible Solution Check this issue with the same problem https://github.com/TobikoData/sqlmesh/issues/750. I think you shouldn't pass 'keepalives_idle=0' by default. </issue> <code> [start of sdk/python/feast/infra/utils/postgres/postgres_config.py] 1 from enum import Enum 2 from typing import Optional 3 4 from pydantic import StrictStr 5 6 from feast.repo_config import FeastConfigBaseModel 7 8 9 class ConnectionType(Enum): 10 singleton = "singleton" 11 pool = "pool" 12 13 14 class PostgreSQLConfig(FeastConfigBaseModel): 15 min_conn: int = 1 16 max_conn: int = 10 17 conn_type: ConnectionType = ConnectionType.singleton 18 host: StrictStr 19 port: int = 5432 20 database: StrictStr 21 db_schema: StrictStr = "public" 22 user: StrictStr 23 password: StrictStr 24 sslmode: Optional[StrictStr] = None 25 sslkey_path: Optional[StrictStr] = None 26 sslcert_path: Optional[StrictStr] = None 27 sslrootcert_path: Optional[StrictStr] = None 28 keepalives_idle: int = 0 29 [end of sdk/python/feast/infra/utils/postgres/postgres_config.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sdk/python/feast/infra/utils/postgres/postgres_config.py b/sdk/python/feast/infra/utils/postgres/postgres_config.py --- a/sdk/python/feast/infra/utils/postgres/postgres_config.py +++ b/sdk/python/feast/infra/utils/postgres/postgres_config.py @@ -25,4 +25,4 @@ sslkey_path: Optional[StrictStr] = None sslcert_path: Optional[StrictStr] = None sslrootcert_path: Optional[StrictStr] = None - keepalives_idle: int = 0 + keepalives_idle: Optional[int] = None
{"golden_diff": "diff --git a/sdk/python/feast/infra/utils/postgres/postgres_config.py b/sdk/python/feast/infra/utils/postgres/postgres_config.py\n--- a/sdk/python/feast/infra/utils/postgres/postgres_config.py\n+++ b/sdk/python/feast/infra/utils/postgres/postgres_config.py\n@@ -25,4 +25,4 @@\n sslkey_path: Optional[StrictStr] = None\n sslcert_path: Optional[StrictStr] = None\n sslrootcert_path: Optional[StrictStr] = None\n- keepalives_idle: int = 0\n+ keepalives_idle: Optional[int] = None\n", "issue": "Postgres engine default keepalives_idle value causes setsockopt(TCP_KEEPIDLE) invalid value\nGet `OperationalError: connection to server at \"localhost\" (127.0.0.1), port 5432 failed: setsockopt(TCP_KEEPIDLE) failed: Invalid argument` when run `feast apply`.\r\nBecause of `keepalives_idle=config.keepalives_idle` field in function '_get_conn' in `infra/utils/postgres/connection_utils.py` file. For example, to avoid this error I need to pass 'keepalives_idle=1', but that argument isn't parsed for the registry in feature_store.yaml and pass 'keepalives_idle=0' by default setting in `infra/utils/postgres/postgres_config.py`. \r\n\r\n- Version: 0.33.1\r\n- Platform: linux ubuntu 20.04\r\n- Subsystem:\r\n\r\n## Possible Solution\r\nCheck this issue with the same problem https://github.com/TobikoData/sqlmesh/issues/750. I think you shouldn't pass 'keepalives_idle=0' by default.\n", "before_files": [{"content": "from enum import Enum\nfrom typing import Optional\n\nfrom pydantic import StrictStr\n\nfrom feast.repo_config import FeastConfigBaseModel\n\n\nclass ConnectionType(Enum):\n singleton = \"singleton\"\n pool = \"pool\"\n\n\nclass PostgreSQLConfig(FeastConfigBaseModel):\n min_conn: int = 1\n max_conn: int = 10\n conn_type: ConnectionType = ConnectionType.singleton\n host: StrictStr\n port: int = 5432\n database: StrictStr\n db_schema: StrictStr = \"public\"\n user: StrictStr\n password: StrictStr\n sslmode: Optional[StrictStr] = None\n sslkey_path: Optional[StrictStr] = None\n sslcert_path: Optional[StrictStr] = None\n sslrootcert_path: Optional[StrictStr] = None\n keepalives_idle: int = 0\n", "path": "sdk/python/feast/infra/utils/postgres/postgres_config.py"}]}
1,036
146
gh_patches_debug_18869
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2407
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> We should be able to create new admin users, upgrade existing users to admin ## Description * `is_superuser` is currently a readonly property in Users APIs. * We should be able to set them while an admin is editing a user. * Users (including super-users) should not be able to modify it's value for themselves. </issue> <code> [start of mathesar/api/ui/serializers/users.py] 1 from django.contrib.auth.password_validation import validate_password 2 from rest_access_policy import FieldAccessMixin, PermittedPkRelatedField 3 from rest_framework import serializers 4 5 from mathesar.api.db.permissions.database import DatabaseAccessPolicy 6 from mathesar.api.db.permissions.schema import SchemaAccessPolicy 7 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin 8 from mathesar.api.exceptions.validation_exceptions.exceptions import IncorrectOldPassword 9 from mathesar.api.ui.permissions.users import UserAccessPolicy 10 from mathesar.models.base import Database, Schema 11 from mathesar.models.users import User, DatabaseRole, SchemaRole 12 13 14 class NestedDatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer): 15 class Meta: 16 model = DatabaseRole 17 fields = ['id', 'database', 'role'] 18 19 20 class NestedSchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer): 21 class Meta: 22 model = SchemaRole 23 fields = ['id', 'schema', 'role'] 24 25 26 class UserSerializer(MathesarErrorMessageMixin, FieldAccessMixin, serializers.ModelSerializer): 27 database_roles = NestedDatabaseRoleSerializer(many=True, required=False) 28 schema_roles = NestedSchemaRoleSerializer(many=True, required=False) 29 access_policy = UserAccessPolicy 30 31 class Meta: 32 model = User 33 fields = [ 34 'id', 35 'full_name', 36 'short_name', 37 'username', 38 'password', 39 'email', 40 'is_superuser', 41 'database_roles', 42 'schema_roles', 43 ] 44 extra_kwargs = { 45 'password': {'write_only': True}, 46 'is_superuser': {'read_only': True}, 47 'database_roles': {'read_only': True}, 48 'schema_roles': {'read_only': True} 49 } 50 51 def create(self, validated_data): 52 password = validated_data.pop('password') 53 user = User(**validated_data) 54 user.password_change_needed = True 55 user.set_password(password) 56 user.save() 57 return user 58 59 60 class ChangePasswordSerializer(serializers.Serializer): 61 password = serializers.CharField(write_only=True, required=True, validators=[validate_password]) 62 old_password = serializers.CharField(write_only=True, required=True) 63 64 def validate_old_password(self, value): 65 user = self.context['request'].user 66 if user.check_password(value) is True: 67 return value 68 raise IncorrectOldPassword(field='old_password') 69 70 def update(self, instance, validated_data): 71 instance.set_password(validated_data['password']) 72 instance.save() 73 return instance 74 75 76 class PasswordResetSerializer(MathesarErrorMessageMixin, serializers.Serializer): 77 password = serializers.CharField(write_only=True, required=True, validators=[validate_password]) 78 79 80 class DatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer): 81 class Meta: 82 model = DatabaseRole 83 fields = ['id', 'user', 'database', 'role'] 84 85 # Restrict the list of databases to which the user has access to create a database role 86 # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/ for the usage of `PermittedPkRelatedField` 87 database = PermittedPkRelatedField( 88 access_policy=DatabaseAccessPolicy, 89 queryset=Database.current_objects.all() 90 ) 91 92 93 class SchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer): 94 class Meta: 95 model = SchemaRole 96 fields = ['id', 'user', 'schema', 'role'] 97 98 schema = PermittedPkRelatedField( 99 access_policy=SchemaAccessPolicy, 100 queryset=Schema.current_objects.all() 101 ) 102 [end of mathesar/api/ui/serializers/users.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/api/ui/serializers/users.py b/mathesar/api/ui/serializers/users.py --- a/mathesar/api/ui/serializers/users.py +++ b/mathesar/api/ui/serializers/users.py @@ -43,11 +43,23 @@ ] extra_kwargs = { 'password': {'write_only': True}, - 'is_superuser': {'read_only': True}, 'database_roles': {'read_only': True}, 'schema_roles': {'read_only': True} } + def get_fields(self): + fields = super().get_fields() + request = self.context.get("request", None) + if not hasattr(request, 'parser_context'): + return fields + kwargs = request.parser_context.get('kwargs') + if kwargs: + user_pk = kwargs.get('pk') + if user_pk: + if request.user.id == int(user_pk) or not request.user.is_superuser: + fields["is_superuser"].read_only = True + return fields + def create(self, validated_data): password = validated_data.pop('password') user = User(**validated_data)
{"golden_diff": "diff --git a/mathesar/api/ui/serializers/users.py b/mathesar/api/ui/serializers/users.py\n--- a/mathesar/api/ui/serializers/users.py\n+++ b/mathesar/api/ui/serializers/users.py\n@@ -43,11 +43,23 @@\n ]\n extra_kwargs = {\n 'password': {'write_only': True},\n- 'is_superuser': {'read_only': True},\n 'database_roles': {'read_only': True},\n 'schema_roles': {'read_only': True}\n }\n \n+ def get_fields(self):\n+ fields = super().get_fields()\n+ request = self.context.get(\"request\", None)\n+ if not hasattr(request, 'parser_context'):\n+ return fields\n+ kwargs = request.parser_context.get('kwargs')\n+ if kwargs:\n+ user_pk = kwargs.get('pk')\n+ if user_pk:\n+ if request.user.id == int(user_pk) or not request.user.is_superuser:\n+ fields[\"is_superuser\"].read_only = True\n+ return fields\n+\n def create(self, validated_data):\n password = validated_data.pop('password')\n user = User(**validated_data)\n", "issue": "We should be able to create new admin users, upgrade existing users to admin\n## Description\r\n* `is_superuser` is currently a readonly property in Users APIs.\r\n* We should be able to set them while an admin is editing a user.\r\n* Users (including super-users) should not be able to modify it's value for themselves.\r\n\n", "before_files": [{"content": "from django.contrib.auth.password_validation import validate_password\nfrom rest_access_policy import FieldAccessMixin, PermittedPkRelatedField\nfrom rest_framework import serializers\n\nfrom mathesar.api.db.permissions.database import DatabaseAccessPolicy\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import IncorrectOldPassword\nfrom mathesar.api.ui.permissions.users import UserAccessPolicy\nfrom mathesar.models.base import Database, Schema\nfrom mathesar.models.users import User, DatabaseRole, SchemaRole\n\n\nclass NestedDatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = DatabaseRole\n fields = ['id', 'database', 'role']\n\n\nclass NestedSchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = SchemaRole\n fields = ['id', 'schema', 'role']\n\n\nclass UserSerializer(MathesarErrorMessageMixin, FieldAccessMixin, serializers.ModelSerializer):\n database_roles = NestedDatabaseRoleSerializer(many=True, required=False)\n schema_roles = NestedSchemaRoleSerializer(many=True, required=False)\n access_policy = UserAccessPolicy\n\n class Meta:\n model = User\n fields = [\n 'id',\n 'full_name',\n 'short_name',\n 'username',\n 'password',\n 'email',\n 'is_superuser',\n 'database_roles',\n 'schema_roles',\n ]\n extra_kwargs = {\n 'password': {'write_only': True},\n 'is_superuser': {'read_only': True},\n 'database_roles': {'read_only': True},\n 'schema_roles': {'read_only': True}\n }\n\n def create(self, validated_data):\n password = validated_data.pop('password')\n user = User(**validated_data)\n user.password_change_needed = True\n user.set_password(password)\n user.save()\n return user\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n password = serializers.CharField(write_only=True, required=True, validators=[validate_password])\n old_password = serializers.CharField(write_only=True, required=True)\n\n def validate_old_password(self, value):\n user = self.context['request'].user\n if user.check_password(value) is True:\n return value\n raise IncorrectOldPassword(field='old_password')\n\n def update(self, instance, validated_data):\n instance.set_password(validated_data['password'])\n instance.save()\n return instance\n\n\nclass PasswordResetSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n password = serializers.CharField(write_only=True, required=True, validators=[validate_password])\n\n\nclass DatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = DatabaseRole\n fields = ['id', 'user', 'database', 'role']\n\n # Restrict the list of databases to which the user has access to create a database role\n # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/ for the usage of `PermittedPkRelatedField`\n database = PermittedPkRelatedField(\n access_policy=DatabaseAccessPolicy,\n queryset=Database.current_objects.all()\n )\n\n\nclass SchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = SchemaRole\n fields = ['id', 'user', 'schema', 'role']\n\n schema = PermittedPkRelatedField(\n access_policy=SchemaAccessPolicy,\n queryset=Schema.current_objects.all()\n )\n", "path": "mathesar/api/ui/serializers/users.py"}]}
1,542
254
gh_patches_debug_22200
rasdani/github-patches
git_diff
mozilla__bugbug-1722
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Submit logged warnings to Sentry Currently, we only report exceptions to Sentry. It'd be nice to report warnings too, so we can get a sense of how often they happen. IIRC in the code-review bot (https://github.com/mozilla/code-review), we are doing that kind of automatically. </issue> <code> [start of http_service/bugbug_http/worker.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # This Source Code Form is subject to the terms of the Mozilla Public 4 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 5 # You can obtain one at http://mozilla.org/MPL/2.0/. 6 7 import os 8 import sys 9 10 import sentry_sdk 11 from redis import Redis 12 from rq import Connection, Worker 13 from sentry_sdk.integrations.rq import RqIntegration 14 15 import bugbug_http.boot 16 from bugbug import get_bugbug_version 17 18 if os.environ.get("SENTRY_DSN"): 19 sentry_sdk.init( 20 os.environ.get("SENTRY_DSN"), 21 integrations=[RqIntegration()], 22 release=get_bugbug_version(), 23 ) 24 25 26 def main(): 27 # Bootstrap the worker assets 28 bugbug_http.boot.boot_worker() 29 30 # Provide queue names to listen to as arguments to this script, 31 # similar to rq worker 32 redis_url = os.environ.get("REDIS_URL", "redis://localhost/0") 33 redis_conn = Redis.from_url(redis_url) 34 with Connection(connection=redis_conn): 35 qs = sys.argv[1:] or ["default"] 36 37 w = Worker(qs) 38 w.work() 39 40 41 if __name__ == "__main__": 42 main() 43 [end of http_service/bugbug_http/worker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/http_service/bugbug_http/worker.py b/http_service/bugbug_http/worker.py --- a/http_service/bugbug_http/worker.py +++ b/http_service/bugbug_http/worker.py @@ -4,21 +4,29 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. +import logging import os import sys import sentry_sdk from redis import Redis from rq import Connection, Worker +from sentry_sdk.integrations.logging import LoggingIntegration from sentry_sdk.integrations.rq import RqIntegration import bugbug_http.boot from bugbug import get_bugbug_version if os.environ.get("SENTRY_DSN"): + logging_integration = LoggingIntegration( + # Default behaviour: INFO messages will be included as breadcrumbs + level=logging.INFO, + # Change default behaviour (ERROR messages events) + event_level=logging.WARNING, + ) sentry_sdk.init( - os.environ.get("SENTRY_DSN"), - integrations=[RqIntegration()], + dsn=os.environ.get("SENTRY_DSN"), + integrations=[RqIntegration(), logging_integration], release=get_bugbug_version(), )
{"golden_diff": "diff --git a/http_service/bugbug_http/worker.py b/http_service/bugbug_http/worker.py\n--- a/http_service/bugbug_http/worker.py\n+++ b/http_service/bugbug_http/worker.py\n@@ -4,21 +4,29 @@\n # License, v. 2.0. If a copy of the MPL was not distributed with this file,\n # You can obtain one at http://mozilla.org/MPL/2.0/.\n \n+import logging\n import os\n import sys\n \n import sentry_sdk\n from redis import Redis\n from rq import Connection, Worker\n+from sentry_sdk.integrations.logging import LoggingIntegration\n from sentry_sdk.integrations.rq import RqIntegration\n \n import bugbug_http.boot\n from bugbug import get_bugbug_version\n \n if os.environ.get(\"SENTRY_DSN\"):\n+ logging_integration = LoggingIntegration(\n+ # Default behaviour: INFO messages will be included as breadcrumbs\n+ level=logging.INFO,\n+ # Change default behaviour (ERROR messages events)\n+ event_level=logging.WARNING,\n+ )\n sentry_sdk.init(\n- os.environ.get(\"SENTRY_DSN\"),\n- integrations=[RqIntegration()],\n+ dsn=os.environ.get(\"SENTRY_DSN\"),\n+ integrations=[RqIntegration(), logging_integration],\n release=get_bugbug_version(),\n )\n", "issue": "Submit logged warnings to Sentry\nCurrently, we only report exceptions to Sentry.\r\nIt'd be nice to report warnings too, so we can get a sense of how often they happen.\r\nIIRC in the code-review bot (https://github.com/mozilla/code-review), we are doing that kind of automatically.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport os\nimport sys\n\nimport sentry_sdk\nfrom redis import Redis\nfrom rq import Connection, Worker\nfrom sentry_sdk.integrations.rq import RqIntegration\n\nimport bugbug_http.boot\nfrom bugbug import get_bugbug_version\n\nif os.environ.get(\"SENTRY_DSN\"):\n sentry_sdk.init(\n os.environ.get(\"SENTRY_DSN\"),\n integrations=[RqIntegration()],\n release=get_bugbug_version(),\n )\n\n\ndef main():\n # Bootstrap the worker assets\n bugbug_http.boot.boot_worker()\n\n # Provide queue names to listen to as arguments to this script,\n # similar to rq worker\n redis_url = os.environ.get(\"REDIS_URL\", \"redis://localhost/0\")\n redis_conn = Redis.from_url(redis_url)\n with Connection(connection=redis_conn):\n qs = sys.argv[1:] or [\"default\"]\n\n w = Worker(qs)\n w.work()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "http_service/bugbug_http/worker.py"}]}
965
289
gh_patches_debug_31438
rasdani/github-patches
git_diff
pyodide__pyodide-2507
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pyodide_build buildpkg does not install Cython as a build dependency when it is spell with a lower case c ## 🐛 Bug When trying to build [cftime](https://github.com/Unidata/cftime) the isolated env does not install cython. ### To Reproduce `python -m pyodide_build buildpkg packages/cftime/meta.yaml` on [this meta.yaml](https://gist.github.com/ocefpaf/8b9a90bfa40d7dc27c63e3bf22ef335a) ### Expected behavior Successful build :smile: ### Environment - Pyodide Version<!-- (e.g. 1.8.1) -->: - Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: - Any other relevant information: ### Additional context A patch to rename `cython` to `Cython` in the cftime pyproject.toml fixed it but we should not be case sensitive with PyPI names. xref.: https://github.com/pyodide/pyodide/pull/2504 </issue> <code> [start of pyodide-build/pyodide_build/pypabuild.py] 1 import contextlib 2 import os 3 import sys 4 import traceback 5 from itertools import chain 6 from pathlib import Path 7 from typing import Mapping 8 9 from build import BuildBackendException, ProjectBuilder # type: ignore[import] 10 from build.__main__ import ( # type: ignore[import] 11 _STYLES, 12 _error, 13 _handle_build_error, 14 _IsolatedEnvBuilder, 15 _ProjectBuilder, 16 ) 17 from build.env import IsolatedEnv # type: ignore[import] 18 from packaging.requirements import Requirement 19 20 from .common import get_hostsitepackages, get_pyversion 21 22 UNISOLATED_PACKAGES = ["numpy", "scipy", "cffi", "pycparser", "pythran", "cython"] 23 24 25 def symlink_unisolated_packages(env: IsolatedEnv): 26 pyversion = get_pyversion() 27 site_packages_path = f"lib/{pyversion}/site-packages" 28 env_site_packages = Path(env._path) / site_packages_path 29 host_site_packages = Path(get_hostsitepackages()) 30 for name in UNISOLATED_PACKAGES: 31 for path in chain( 32 host_site_packages.glob(f"{name}*"), host_site_packages.glob(f"_{name}*") 33 ): 34 (env_site_packages / path.name).unlink(missing_ok=True) 35 (env_site_packages / path.name).symlink_to(path) 36 37 38 def remove_unisolated_requirements(requires: set[str]) -> set[str]: 39 for reqstr in list(requires): 40 req = Requirement(reqstr) 41 for avoid_name in UNISOLATED_PACKAGES: 42 if avoid_name in req.name: 43 requires.remove(reqstr) 44 return requires 45 46 47 @contextlib.contextmanager 48 def replace_env(build_env: Mapping[str, str]): 49 old_environ = dict(os.environ) 50 os.environ.clear() 51 os.environ.update(build_env) 52 try: 53 yield 54 finally: 55 os.environ.clear() 56 os.environ.update(old_environ) 57 58 59 def install_reqs(env: IsolatedEnv, reqs: set[str]): 60 env.install(remove_unisolated_requirements(reqs)) 61 62 63 def _build_in_isolated_env( 64 build_env: Mapping[str, str], 65 builder: ProjectBuilder, 66 outdir: str, 67 distribution: str, 68 ) -> str: 69 with _IsolatedEnvBuilder() as env: 70 builder.python_executable = env.executable 71 builder.scripts_dir = env.scripts_dir 72 # first install the build dependencies 73 symlink_unisolated_packages(env) 74 install_reqs(env, builder.build_system_requires) 75 installed_requires_for_build = False 76 try: 77 build_reqs = builder.get_requires_for_build(distribution) 78 except BuildBackendException: 79 pass 80 else: 81 install_reqs(env, build_reqs) 82 installed_requires_for_build = True 83 84 with replace_env(build_env): 85 if not installed_requires_for_build: 86 install_reqs(env, builder.get_requires_for_build(distribution)) 87 return builder.build(distribution, outdir, {}) 88 89 90 def build(build_env: Mapping[str, str]): 91 srcdir = Path.cwd() 92 outdir = srcdir / "dist" 93 builder = _ProjectBuilder(srcdir) 94 distribution = "wheel" 95 try: 96 with _handle_build_error(): 97 built = _build_in_isolated_env( 98 build_env, builder, str(outdir), distribution 99 ) 100 print("{bold}{green}Successfully built {}{reset}".format(built, **_STYLES)) 101 except Exception as e: # pragma: no cover 102 tb = traceback.format_exc().strip("\n") 103 print("\n{dim}{}{reset}\n".format(tb, **_STYLES)) 104 _error(str(e)) 105 sys.exit(1) 106 [end of pyodide-build/pyodide_build/pypabuild.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyodide-build/pyodide_build/pypabuild.py b/pyodide-build/pyodide_build/pypabuild.py --- a/pyodide-build/pyodide_build/pypabuild.py +++ b/pyodide-build/pyodide_build/pypabuild.py @@ -19,7 +19,7 @@ from .common import get_hostsitepackages, get_pyversion -UNISOLATED_PACKAGES = ["numpy", "scipy", "cffi", "pycparser", "pythran", "cython"] +UNISOLATED_PACKAGES = ["numpy", "scipy", "cffi", "pycparser", "pythran"] def symlink_unisolated_packages(env: IsolatedEnv): @@ -39,7 +39,7 @@ for reqstr in list(requires): req = Requirement(reqstr) for avoid_name in UNISOLATED_PACKAGES: - if avoid_name in req.name: + if avoid_name in req.name.lower(): requires.remove(reqstr) return requires @@ -58,6 +58,11 @@ def install_reqs(env: IsolatedEnv, reqs: set[str]): env.install(remove_unisolated_requirements(reqs)) + # Some packages (numcodecs) don't declare cython as a build dependency and + # only recythonize if it is present. We need them to always recythonize so + # we always install cython. If the reqs included some cython version already + # then this won't do anything. + env.install(["cython"]) def _build_in_isolated_env( @@ -66,6 +71,10 @@ outdir: str, distribution: str, ) -> str: + # For debugging: The following line disables removal of the isolated venv. + # It will be left in the /tmp folder and can be inspected or entered as + # needed. + # _IsolatedEnvBuilder.__exit__ = lambda *args: None with _IsolatedEnvBuilder() as env: builder.python_executable = env.executable builder.scripts_dir = env.scripts_dir
{"golden_diff": "diff --git a/pyodide-build/pyodide_build/pypabuild.py b/pyodide-build/pyodide_build/pypabuild.py\n--- a/pyodide-build/pyodide_build/pypabuild.py\n+++ b/pyodide-build/pyodide_build/pypabuild.py\n@@ -19,7 +19,7 @@\n \n from .common import get_hostsitepackages, get_pyversion\n \n-UNISOLATED_PACKAGES = [\"numpy\", \"scipy\", \"cffi\", \"pycparser\", \"pythran\", \"cython\"]\n+UNISOLATED_PACKAGES = [\"numpy\", \"scipy\", \"cffi\", \"pycparser\", \"pythran\"]\n \n \n def symlink_unisolated_packages(env: IsolatedEnv):\n@@ -39,7 +39,7 @@\n for reqstr in list(requires):\n req = Requirement(reqstr)\n for avoid_name in UNISOLATED_PACKAGES:\n- if avoid_name in req.name:\n+ if avoid_name in req.name.lower():\n requires.remove(reqstr)\n return requires\n \n@@ -58,6 +58,11 @@\n \n def install_reqs(env: IsolatedEnv, reqs: set[str]):\n env.install(remove_unisolated_requirements(reqs))\n+ # Some packages (numcodecs) don't declare cython as a build dependency and\n+ # only recythonize if it is present. We need them to always recythonize so\n+ # we always install cython. If the reqs included some cython version already\n+ # then this won't do anything.\n+ env.install([\"cython\"])\n \n \n def _build_in_isolated_env(\n@@ -66,6 +71,10 @@\n outdir: str,\n distribution: str,\n ) -> str:\n+ # For debugging: The following line disables removal of the isolated venv.\n+ # It will be left in the /tmp folder and can be inspected or entered as\n+ # needed.\n+ # _IsolatedEnvBuilder.__exit__ = lambda *args: None\n with _IsolatedEnvBuilder() as env:\n builder.python_executable = env.executable\n builder.scripts_dir = env.scripts_dir\n", "issue": "pyodide_build buildpkg does not install Cython as a build dependency when it is spell with a lower case c\n## \ud83d\udc1b Bug\r\n\r\nWhen trying to build [cftime](https://github.com/Unidata/cftime) the isolated env does not install cython.\r\n\r\n### To Reproduce\r\n\r\n`python -m pyodide_build buildpkg packages/cftime/meta.yaml` on [this meta.yaml](https://gist.github.com/ocefpaf/8b9a90bfa40d7dc27c63e3bf22ef335a)\r\n\r\n### Expected behavior\r\n\r\nSuccessful build :smile: \r\n\r\n### Environment\r\n\r\n- Pyodide Version<!-- (e.g. 1.8.1) -->:\r\n- Browser version<!-- (e.g. Chrome 95.0.4638.54) -->:\r\n- Any other relevant information:\r\n\r\n\r\n### Additional context\r\n\r\nA patch to rename `cython` to `Cython` in the cftime pyproject.toml fixed it but we should not be case sensitive with PyPI names.\r\n\r\nxref.: https://github.com/pyodide/pyodide/pull/2504\n", "before_files": [{"content": "import contextlib\nimport os\nimport sys\nimport traceback\nfrom itertools import chain\nfrom pathlib import Path\nfrom typing import Mapping\n\nfrom build import BuildBackendException, ProjectBuilder # type: ignore[import]\nfrom build.__main__ import ( # type: ignore[import]\n _STYLES,\n _error,\n _handle_build_error,\n _IsolatedEnvBuilder,\n _ProjectBuilder,\n)\nfrom build.env import IsolatedEnv # type: ignore[import]\nfrom packaging.requirements import Requirement\n\nfrom .common import get_hostsitepackages, get_pyversion\n\nUNISOLATED_PACKAGES = [\"numpy\", \"scipy\", \"cffi\", \"pycparser\", \"pythran\", \"cython\"]\n\n\ndef symlink_unisolated_packages(env: IsolatedEnv):\n pyversion = get_pyversion()\n site_packages_path = f\"lib/{pyversion}/site-packages\"\n env_site_packages = Path(env._path) / site_packages_path\n host_site_packages = Path(get_hostsitepackages())\n for name in UNISOLATED_PACKAGES:\n for path in chain(\n host_site_packages.glob(f\"{name}*\"), host_site_packages.glob(f\"_{name}*\")\n ):\n (env_site_packages / path.name).unlink(missing_ok=True)\n (env_site_packages / path.name).symlink_to(path)\n\n\ndef remove_unisolated_requirements(requires: set[str]) -> set[str]:\n for reqstr in list(requires):\n req = Requirement(reqstr)\n for avoid_name in UNISOLATED_PACKAGES:\n if avoid_name in req.name:\n requires.remove(reqstr)\n return requires\n\n\[email protected]\ndef replace_env(build_env: Mapping[str, str]):\n old_environ = dict(os.environ)\n os.environ.clear()\n os.environ.update(build_env)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_environ)\n\n\ndef install_reqs(env: IsolatedEnv, reqs: set[str]):\n env.install(remove_unisolated_requirements(reqs))\n\n\ndef _build_in_isolated_env(\n build_env: Mapping[str, str],\n builder: ProjectBuilder,\n outdir: str,\n distribution: str,\n) -> str:\n with _IsolatedEnvBuilder() as env:\n builder.python_executable = env.executable\n builder.scripts_dir = env.scripts_dir\n # first install the build dependencies\n symlink_unisolated_packages(env)\n install_reqs(env, builder.build_system_requires)\n installed_requires_for_build = False\n try:\n build_reqs = builder.get_requires_for_build(distribution)\n except BuildBackendException:\n pass\n else:\n install_reqs(env, build_reqs)\n installed_requires_for_build = True\n\n with replace_env(build_env):\n if not installed_requires_for_build:\n install_reqs(env, builder.get_requires_for_build(distribution))\n return builder.build(distribution, outdir, {})\n\n\ndef build(build_env: Mapping[str, str]):\n srcdir = Path.cwd()\n outdir = srcdir / \"dist\"\n builder = _ProjectBuilder(srcdir)\n distribution = \"wheel\"\n try:\n with _handle_build_error():\n built = _build_in_isolated_env(\n build_env, builder, str(outdir), distribution\n )\n print(\"{bold}{green}Successfully built {}{reset}\".format(built, **_STYLES))\n except Exception as e: # pragma: no cover\n tb = traceback.format_exc().strip(\"\\n\")\n print(\"\\n{dim}{}{reset}\\n\".format(tb, **_STYLES))\n _error(str(e))\n sys.exit(1)\n", "path": "pyodide-build/pyodide_build/pypabuild.py"}]}
1,806
486
gh_patches_debug_3703
rasdani/github-patches
git_diff
wright-group__WrightTools-359
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> coverage consider using [coverage](https://coverage.readthedocs.io/en/coverage-4.4.1/) </issue> <code> [start of setup.py] 1 #! /usr/bin/env python3 2 3 import os 4 from setuptools import setup, find_packages 5 6 7 def package_files(directory): 8 paths = [] 9 for (path, directories, filenames) in os.walk(directory): 10 for filename in filenames: 11 paths.append(os.path.join('..', path, filename)) 12 return paths 13 14 15 here = os.path.abspath(os.path.dirname(__file__)) 16 17 extra_files = package_files(os.path.join(here, 'WrightTools', 'datasets')) 18 extra_files.append(os.path.join(here, 'CONTRIBUTORS')) 19 extra_files.append(os.path.join(here, 'LICENSE')) 20 extra_files.append(os.path.join(here, 'README.rst')) 21 extra_files.append(os.path.join(here, 'requirements.txt')) 22 extra_files.append(os.path.join(here, 'VERSION')) 23 extra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json')) 24 25 with open(os.path.join(here, 'requirements.txt')) as f: 26 required = f.read().splitlines() 27 28 with open(os.path.join(here, 'VERSION')) as version_file: 29 version = version_file.read().strip() 30 31 setup( 32 name='WrightTools', 33 packages=find_packages(), 34 package_data={'': extra_files}, 35 setup_requires=['pytest-runner'], 36 tests_require=['pytest'], 37 install_requires=required, 38 extras_require={'docs': ['sphinx-gallery>=0.1.9']}, 39 version=version, 40 description='Tools for loading, processing, and plotting multidimensional spectroscopy data.', 41 author='Blaise Thompson', 42 author_email='[email protected]', 43 license='MIT', 44 url='http://wright.tools', 45 keywords='spectroscopy science multidimensional visualization', 46 classifiers=['Development Status :: 5 - Production/Stable', 47 'Intended Audience :: Science/Research', 48 'License :: OSI Approved :: MIT License', 49 'Natural Language :: English', 50 'Programming Language :: Python :: 2', 51 'Programming Language :: Python :: 2.7', 52 'Programming Language :: Python :: 3', 53 'Programming Language :: Python :: 3.3', 54 'Programming Language :: Python :: 3.4', 55 'Programming Language :: Python :: 3.5', 56 'Topic :: Scientific/Engineering'] 57 ) 58 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -33,7 +33,7 @@ packages=find_packages(), package_data={'': extra_files}, setup_requires=['pytest-runner'], - tests_require=['pytest'], + tests_require=['pytest', 'pytest-cov'], install_requires=required, extras_require={'docs': ['sphinx-gallery>=0.1.9']}, version=version,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n- tests_require=['pytest'],\n+ tests_require=['pytest', 'pytest-cov'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n", "issue": "coverage\nconsider using [coverage](https://coverage.readthedocs.io/en/coverage-4.4.1/)\n", "before_files": [{"content": "#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nextra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\nextra_files.append(os.path.join(here, 'CONTRIBUTORS'))\nextra_files.append(os.path.join(here, 'LICENSE'))\nextra_files.append(os.path.join(here, 'README.rst'))\nextra_files.append(os.path.join(here, 'requirements.txt'))\nextra_files.append(os.path.join(here, 'VERSION'))\nextra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='WrightTools',\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',\n author='Blaise Thompson',\n author_email='[email protected]',\n license='MIT',\n url='http://wright.tools',\n keywords='spectroscopy science multidimensional visualization',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering']\n)\n", "path": "setup.py"}]}
1,140
100
gh_patches_debug_7544
rasdani/github-patches
git_diff
liqd__a4-product-375
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [partner landing page] unpublished and archived projects are shown On the partner landing page, we show unpublished and archived projects. Unpublished projects should never be shown and archived projects should be hidden per default. See: https://product-dev.liqd.net/teststadt/ ![bildschirmfoto 2018-05-28 um 11 33 51](https://user-images.githubusercontent.com/15341015/40608238-2254983e-626b-11e8-8429-588c014f7a82.png) </issue> <code> [start of liqd_product/apps/partners/views.py] 1 from django.contrib.messages.views import SuccessMessageMixin 2 from django.utils.translation import ugettext_lazy as _ 3 from django.views import generic 4 from django.views.generic import DetailView 5 6 from adhocracy4.actions.models import Action 7 from adhocracy4.projects.models import Project 8 from adhocracy4.rules import mixins as rules_mixins 9 from liqd_product.apps.partners.models import Partner 10 11 from . import forms 12 13 14 class PartnerView(DetailView): 15 template_name = 'partner_landing_page.html' 16 model = Partner 17 slug_url_kwarg = 'partner_slug' 18 19 def get_context_data(self, **kwargs): 20 context = super().get_context_data(**kwargs) 21 22 context['project_list'] = Project.objects\ 23 .filter(organisation__partner=self.object) 24 25 context['action_list'] = Action.objects\ 26 .filter(project__organisation__partner=self.object)\ 27 .filter_public()\ 28 .exclude_updates()[:4] 29 30 context['stats'] = { 31 'users': 1204, 32 'items': 3425, 33 'comments': 23234, 34 'ratings': 134234, 35 } 36 37 return context 38 39 40 class InformationView(DetailView): 41 template_name = 'partner_information.html' 42 model = Partner 43 slug_url_kwarg = 'partner_slug' 44 45 46 class ImprintView(DetailView): 47 template_name = 'partner_imprint.html' 48 model = Partner 49 slug_url_kwarg = 'partner_slug' 50 51 52 class PartnerUpdateView(rules_mixins.PermissionRequiredMixin, 53 SuccessMessageMixin, 54 generic.UpdateView): 55 model = Partner 56 form_class = forms.PartnerForm 57 slug_url_kwarg = 'partner_slug' 58 template_name = 'partner_form.html' 59 success_message = _('Municipality successfully updated.') 60 permission_required = 'liqd_product_partners.change_partner' 61 menu_item = 'partner' 62 63 def get_success_url(self): 64 return self.request.path 65 [end of liqd_product/apps/partners/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/liqd_product/apps/partners/views.py b/liqd_product/apps/partners/views.py --- a/liqd_product/apps/partners/views.py +++ b/liqd_product/apps/partners/views.py @@ -20,7 +20,9 @@ context = super().get_context_data(**kwargs) context['project_list'] = Project.objects\ - .filter(organisation__partner=self.object) + .filter(organisation__partner=self.object, + is_archived=False, + is_draft=False) context['action_list'] = Action.objects\ .filter(project__organisation__partner=self.object)\
{"golden_diff": "diff --git a/liqd_product/apps/partners/views.py b/liqd_product/apps/partners/views.py\n--- a/liqd_product/apps/partners/views.py\n+++ b/liqd_product/apps/partners/views.py\n@@ -20,7 +20,9 @@\n context = super().get_context_data(**kwargs)\n \n context['project_list'] = Project.objects\\\n- .filter(organisation__partner=self.object)\n+ .filter(organisation__partner=self.object,\n+ is_archived=False,\n+ is_draft=False)\n \n context['action_list'] = Action.objects\\\n .filter(project__organisation__partner=self.object)\\\n", "issue": "[partner landing page] unpublished and archived projects are shown\nOn the partner landing page, we show unpublished and archived projects. Unpublished projects should never be shown and archived projects should be hidden per default.\r\n\r\nSee: https://product-dev.liqd.net/teststadt/\r\n\r\n![bildschirmfoto 2018-05-28 um 11 33 51](https://user-images.githubusercontent.com/15341015/40608238-2254983e-626b-11e8-8429-588c014f7a82.png)\r\n\r\n\r\n\n", "before_files": [{"content": "from django.contrib.messages.views import SuccessMessageMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic import DetailView\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.projects.models import Project\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom liqd_product.apps.partners.models import Partner\n\nfrom . import forms\n\n\nclass PartnerView(DetailView):\n template_name = 'partner_landing_page.html'\n model = Partner\n slug_url_kwarg = 'partner_slug'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context['project_list'] = Project.objects\\\n .filter(organisation__partner=self.object)\n\n context['action_list'] = Action.objects\\\n .filter(project__organisation__partner=self.object)\\\n .filter_public()\\\n .exclude_updates()[:4]\n\n context['stats'] = {\n 'users': 1204,\n 'items': 3425,\n 'comments': 23234,\n 'ratings': 134234,\n }\n\n return context\n\n\nclass InformationView(DetailView):\n template_name = 'partner_information.html'\n model = Partner\n slug_url_kwarg = 'partner_slug'\n\n\nclass ImprintView(DetailView):\n template_name = 'partner_imprint.html'\n model = Partner\n slug_url_kwarg = 'partner_slug'\n\n\nclass PartnerUpdateView(rules_mixins.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = Partner\n form_class = forms.PartnerForm\n slug_url_kwarg = 'partner_slug'\n template_name = 'partner_form.html'\n success_message = _('Municipality successfully updated.')\n permission_required = 'liqd_product_partners.change_partner'\n menu_item = 'partner'\n\n def get_success_url(self):\n return self.request.path\n", "path": "liqd_product/apps/partners/views.py"}]}
1,233
136
gh_patches_debug_13696
rasdani/github-patches
git_diff
enthought__chaco-634
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dont use traitsui.api as tui https://github.com/enthought/chaco/blob/3de7780561fa29e79c887432d3ce408ea82d1614/chaco/plugin/plot_editor.py makes use of the odd `import traitsui.api as tui` alias which needs to be updated and removed. </issue> <code> [start of chaco/plugin/plot_editor.py] 1 from chaco.shell.scaly_plot import ScalyPlot 2 from enable.component_editor import ComponentEditor 3 from pyface.workbench.api import TraitsUIEditor 4 from traits.api import Any, Enum, HasTraits, Property, Str 5 from traitsui import api as tui 6 7 8 class PlotUI(HasTraits): 9 """Simple Traits UI proxy for a Chaco plot.""" 10 11 # The plot. 12 component = Any() 13 14 traits_view = tui.View( 15 tui.Item("component", editor=ComponentEditor(), show_label=False), 16 resizable=True, 17 ) 18 19 20 class PlotEditor(TraitsUIEditor): 21 """A Workbench Editor showing a Chaco plot for the shell interface.""" 22 23 bgcolor = Str("white") 24 image_default_origin = Enum( 25 "bottom left", "top left", "bottom right", "top right" 26 ) 27 28 # The plot. 29 component = Property(Any) 30 container = Property(Any) 31 32 # The PlotData. 33 data = Any() 34 35 # The PlotSession of which we are a part. We need to know this in order 36 # to notify it of our being closed, etc. 37 session = Any() 38 39 def __init__( 40 self, 41 is_image=False, 42 bgcolor="white", 43 image_default_origin="top left", 44 *args, 45 **kw 46 ): 47 48 super(TraitsUIEditor, self).__init__(**kw) 49 50 # Some defaults which should be overridden by preferences. 51 self.bgcolor = bgcolor 52 self.image_default_origin = image_default_origin 53 54 # Create an empty top-level container 55 if is_image: 56 top_container = self._create_top_img_container() 57 else: 58 top_container = self._create_top_container() 59 60 self.obj = PlotUI(component=top_container) 61 62 #### PlotWindow interface ################################################## 63 64 def get_container(self): 65 return self.obj.component 66 67 def set_container(self, container): 68 self.obj.component = container 69 70 def iconize(self, iconize): 71 """Iconizes the window if *iconize* is True. 72 73 Do nothing in this implementation. 74 """ 75 76 def maximize(self, maximize): 77 """If *maximize* is True, maximizes the window size; restores if False. 78 79 Do nothing in this implementation. 80 """ 81 82 def set_size(self, width, height): 83 pass 84 85 def set_title(self, title): 86 self.name = title 87 88 def raise_window(self): 89 self.window.activate_editor(self) 90 91 #### Editor interface ###################################################### 92 93 def destroy_control(self): 94 """Destroy the toolkit-specific control that represents the part.""" 95 self._on_window_close() 96 super(TraitsUIEditor, self).destroy_control() 97 98 #### Private interface ##################################################### 99 100 def _get_container(self): 101 return self.obj.component 102 103 def _set_container(self, value): 104 self.obj.component = value 105 106 def _get_component(self): 107 return self.obj.component 108 109 def _set_component(self, value): 110 self.obj.component = value 111 112 def _create_top_container(self): 113 plot = ScalyPlot( 114 padding=50, 115 fill_padding=True, 116 bgcolor=self.bgcolor, 117 use_backbuffer=True, 118 ) 119 return plot 120 121 def _create_top_img_container(self): 122 plot = ScalyPlot( 123 padding=50, 124 fill_padding=True, 125 bgcolor=self.bgcolor, 126 use_backbuffer=True, 127 default_origin=self.image_default_origin, 128 ) 129 return plot 130 131 def _on_window_close(self): 132 if self.session: 133 try: 134 ndx = self.session.windows.index(self) 135 self.session.del_window(ndx) 136 except ValueError: 137 pass 138 [end of chaco/plugin/plot_editor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chaco/plugin/plot_editor.py b/chaco/plugin/plot_editor.py --- a/chaco/plugin/plot_editor.py +++ b/chaco/plugin/plot_editor.py @@ -2,7 +2,7 @@ from enable.component_editor import ComponentEditor from pyface.workbench.api import TraitsUIEditor from traits.api import Any, Enum, HasTraits, Property, Str -from traitsui import api as tui +from traitsui.api import Item, View class PlotUI(HasTraits): @@ -11,8 +11,8 @@ # The plot. component = Any() - traits_view = tui.View( - tui.Item("component", editor=ComponentEditor(), show_label=False), + traits_view = View( + Item("component", editor=ComponentEditor(), show_label=False), resizable=True, )
{"golden_diff": "diff --git a/chaco/plugin/plot_editor.py b/chaco/plugin/plot_editor.py\n--- a/chaco/plugin/plot_editor.py\n+++ b/chaco/plugin/plot_editor.py\n@@ -2,7 +2,7 @@\n from enable.component_editor import ComponentEditor\n from pyface.workbench.api import TraitsUIEditor\n from traits.api import Any, Enum, HasTraits, Property, Str\n-from traitsui import api as tui\n+from traitsui.api import Item, View\n \n \n class PlotUI(HasTraits):\n@@ -11,8 +11,8 @@\n # The plot.\n component = Any()\n \n- traits_view = tui.View(\n- tui.Item(\"component\", editor=ComponentEditor(), show_label=False),\n+ traits_view = View(\n+ Item(\"component\", editor=ComponentEditor(), show_label=False),\n resizable=True,\n )\n", "issue": "Dont use traitsui.api as tui\nhttps://github.com/enthought/chaco/blob/3de7780561fa29e79c887432d3ce408ea82d1614/chaco/plugin/plot_editor.py makes use of the odd `import traitsui.api as tui` alias which needs to be updated and removed.\n", "before_files": [{"content": "from chaco.shell.scaly_plot import ScalyPlot\nfrom enable.component_editor import ComponentEditor\nfrom pyface.workbench.api import TraitsUIEditor\nfrom traits.api import Any, Enum, HasTraits, Property, Str\nfrom traitsui import api as tui\n\n\nclass PlotUI(HasTraits):\n \"\"\"Simple Traits UI proxy for a Chaco plot.\"\"\"\n\n # The plot.\n component = Any()\n\n traits_view = tui.View(\n tui.Item(\"component\", editor=ComponentEditor(), show_label=False),\n resizable=True,\n )\n\n\nclass PlotEditor(TraitsUIEditor):\n \"\"\"A Workbench Editor showing a Chaco plot for the shell interface.\"\"\"\n\n bgcolor = Str(\"white\")\n image_default_origin = Enum(\n \"bottom left\", \"top left\", \"bottom right\", \"top right\"\n )\n\n # The plot.\n component = Property(Any)\n container = Property(Any)\n\n # The PlotData.\n data = Any()\n\n # The PlotSession of which we are a part. We need to know this in order\n # to notify it of our being closed, etc.\n session = Any()\n\n def __init__(\n self,\n is_image=False,\n bgcolor=\"white\",\n image_default_origin=\"top left\",\n *args,\n **kw\n ):\n\n super(TraitsUIEditor, self).__init__(**kw)\n\n # Some defaults which should be overridden by preferences.\n self.bgcolor = bgcolor\n self.image_default_origin = image_default_origin\n\n # Create an empty top-level container\n if is_image:\n top_container = self._create_top_img_container()\n else:\n top_container = self._create_top_container()\n\n self.obj = PlotUI(component=top_container)\n\n #### PlotWindow interface ##################################################\n\n def get_container(self):\n return self.obj.component\n\n def set_container(self, container):\n self.obj.component = container\n\n def iconize(self, iconize):\n \"\"\"Iconizes the window if *iconize* is True.\n\n Do nothing in this implementation.\n \"\"\"\n\n def maximize(self, maximize):\n \"\"\"If *maximize* is True, maximizes the window size; restores if False.\n\n Do nothing in this implementation.\n \"\"\"\n\n def set_size(self, width, height):\n pass\n\n def set_title(self, title):\n self.name = title\n\n def raise_window(self):\n self.window.activate_editor(self)\n\n #### Editor interface ######################################################\n\n def destroy_control(self):\n \"\"\"Destroy the toolkit-specific control that represents the part.\"\"\"\n self._on_window_close()\n super(TraitsUIEditor, self).destroy_control()\n\n #### Private interface #####################################################\n\n def _get_container(self):\n return self.obj.component\n\n def _set_container(self, value):\n self.obj.component = value\n\n def _get_component(self):\n return self.obj.component\n\n def _set_component(self, value):\n self.obj.component = value\n\n def _create_top_container(self):\n plot = ScalyPlot(\n padding=50,\n fill_padding=True,\n bgcolor=self.bgcolor,\n use_backbuffer=True,\n )\n return plot\n\n def _create_top_img_container(self):\n plot = ScalyPlot(\n padding=50,\n fill_padding=True,\n bgcolor=self.bgcolor,\n use_backbuffer=True,\n default_origin=self.image_default_origin,\n )\n return plot\n\n def _on_window_close(self):\n if self.session:\n try:\n ndx = self.session.windows.index(self)\n self.session.del_window(ndx)\n except ValueError:\n pass\n", "path": "chaco/plugin/plot_editor.py"}]}
1,720
189
gh_patches_debug_35309
rasdani/github-patches
git_diff
nvaccess__nvda-9119
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Windows Store apps: use app title as product name instead of wwahost **Reported by nvdakor on 2014-07-07 13:10** Hi, Currently, when invoking appModule.productName for a Windows Store app, NVDA says "Windows operating system". Although this is fine for built-in apps such as Bing Weather and Windows Store, this may confuse users and developers when they are testing accessibility of Store apps (both existing ones and apps to be created in the future). As a way of providing actual product name for debugging purposes and for people to tell us which app they are having issues with, I propose using app title for productName. Before: 1. Go to Windows Store app such as Weather. 2. Once the app opens, press NVDA+F1 to show developer info. Look under appModule.productName. After: 1. Open any Windows Store app. 2. Press NVDA+F1 to open developer info. appModule.productName tells you the title of the app. Implementation: 1. In appModules.wwahost.py, override _get_productName to return the title (api.getForegroundObject().name). 2. wwahost.AppModule.productName will contain the actual title of the app. Thanks. **Blocked by #4569** Load specific app modules for apps hosted by wwahost.exe **Reported by jteh on 2014-10-23 07:35** wwahost.exe hosts many Windows Store apps (though it seems some do have their own executables). To allow for application specific customisation, we should identify the specific application in the app module name. I think we can use GetApplicationUserModelId (in kernel32) for this purpose. This includes a "!" character, so we'll need to check whether Python will cope with this and replace if not. **Blocked by #4360** **Blocking #4259** </issue> <code> [start of source/appModules/wwahost.py] 1 #A part of NonVisual Desktop Access (NVDA) 2 #This file is covered by the GNU General Public License. 3 #See the file COPYING for more details. 4 #Copyright (C) 2012 NV Access Limited 5 6 from comtypes import COMError 7 import IAccessibleHandler 8 from NVDAObjects.IAccessible.MSHTML import Body 9 import appModuleHandler 10 import controlTypes 11 import winUser 12 13 class AppModule(appModuleHandler.AppModule): 14 15 def event_NVDAObject_init(self,obj): 16 #The root document of HTML Metro Apps must be treeted as an application. 17 if isinstance(obj,Body) and obj.windowClassName=="Internet Explorer_Server": 18 try: 19 paccParent=obj.IAccessibleObject.accParent.accParent 20 identity=IAccessibleHandler.getIAccIdentity(paccParent,0) 21 except (COMError,AttributeError): 22 identity=None 23 if identity: 24 windowHandle=identity.get('windowHandle') 25 if windowHandle and winUser.getClassName(windowHandle)=="Web Platform Embedding": 26 obj.role=controlTypes.ROLE_APPLICATION 27 [end of source/appModules/wwahost.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/source/appModules/wwahost.py b/source/appModules/wwahost.py --- a/source/appModules/wwahost.py +++ b/source/appModules/wwahost.py @@ -1,26 +1,40 @@ -#A part of NonVisual Desktop Access (NVDA) -#This file is covered by the GNU General Public License. -#See the file COPYING for more details. -#Copyright (C) 2012 NV Access Limited +# A part of NonVisual Desktop Access (NVDA) +# This file is covered by the GNU General Public License. +# See the file COPYING for more details. +# Copyright (C) 2012-2020 NV Access Limited, Joseph Lee -from comtypes import COMError -import IAccessibleHandler -from NVDAObjects.IAccessible.MSHTML import Body +"""App module host for Windows 8.x and 10 apps hosted by wwahost.exe. +In Windows 8, apps written in Javascript are executed inside WWAHost, including some WinRT apps. +In Windows 10, progressive web apps (PWA) and friends are hosted inside this process. +App modules wishing to support apps hosted inside this process must subclass the AppModule class. +""" + +import ctypes import appModuleHandler -import controlTypes -import winUser +import winKernel + + +def getAppNameFromHost(processId): + # Some apps that come with Windows 8 and 8.1 are hosted by wwahost.exe. + # App modules for these are named after the hosted app name. + processHandle = winKernel.openProcess( + winKernel.SYNCHRONIZE | winKernel.PROCESS_QUERY_INFORMATION, False, processId + ) + length = ctypes.c_uint() + winKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), None) + appModel = ctypes.create_unicode_buffer(length.value) + winKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), appModel) + winKernel.closeHandle(processHandle) + # Sometimes app model might be empty, so raise errors and fall back to wwahost. + if not appModel.value: + raise LookupError + # App model is shown as familyName!appName, + # and importing files with the exclamation point in the middle of the name isn't supported. + # Therefore return only the app name portion. + # Convert this into lowercase to make the file name consistent with other NVDA app modules. + return appModel.value.split("!")[-1].lower() class AppModule(appModuleHandler.AppModule): - def event_NVDAObject_init(self,obj): - #The root document of HTML Metro Apps must be treeted as an application. - if isinstance(obj,Body) and obj.windowClassName=="Internet Explorer_Server": - try: - paccParent=obj.IAccessibleObject.accParent.accParent - identity=IAccessibleHandler.getIAccIdentity(paccParent,0) - except (COMError,AttributeError): - identity=None - if identity: - windowHandle=identity.get('windowHandle') - if windowHandle and winUser.getClassName(windowHandle)=="Web Platform Embedding": - obj.role=controlTypes.ROLE_APPLICATION + # WWAHost app content is treated as part of an app, not a browse mode document. + disableBrowseModeByDefault = True
{"golden_diff": "diff --git a/source/appModules/wwahost.py b/source/appModules/wwahost.py\n--- a/source/appModules/wwahost.py\n+++ b/source/appModules/wwahost.py\n@@ -1,26 +1,40 @@\n-#A part of NonVisual Desktop Access (NVDA)\r\n-#This file is covered by the GNU General Public License.\r\n-#See the file COPYING for more details.\r\n-#Copyright (C) 2012 NV Access Limited\r\n+# A part of NonVisual Desktop Access (NVDA)\r\n+# This file is covered by the GNU General Public License.\r\n+# See the file COPYING for more details.\r\n+# Copyright (C) 2012-2020 NV Access Limited, Joseph Lee\r\n \r\n-from comtypes import COMError\r\n-import IAccessibleHandler\r\n-from NVDAObjects.IAccessible.MSHTML import Body\r\n+\"\"\"App module host for Windows 8.x and 10 apps hosted by wwahost.exe.\r\n+In Windows 8, apps written in Javascript are executed inside WWAHost, including some WinRT apps.\r\n+In Windows 10, progressive web apps (PWA) and friends are hosted inside this process.\r\n+App modules wishing to support apps hosted inside this process must subclass the AppModule class.\r\n+\"\"\"\r\n+\r\n+import ctypes\r\n import appModuleHandler\r\n-import controlTypes\r\n-import winUser\r\n+import winKernel\r\n+\r\n+\r\n+def getAppNameFromHost(processId):\r\n+\t# Some apps that come with Windows 8 and 8.1 are hosted by wwahost.exe.\r\n+\t# App modules for these are named after the hosted app name.\r\n+\tprocessHandle = winKernel.openProcess(\r\n+\t\twinKernel.SYNCHRONIZE | winKernel.PROCESS_QUERY_INFORMATION, False, processId\r\n+\t)\r\n+\tlength = ctypes.c_uint()\r\n+\twinKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), None)\r\n+\tappModel = ctypes.create_unicode_buffer(length.value)\r\n+\twinKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), appModel)\r\n+\twinKernel.closeHandle(processHandle)\r\n+\t# Sometimes app model might be empty, so raise errors and fall back to wwahost.\r\n+\tif not appModel.value:\r\n+\t\traise LookupError\r\n+\t# App model is shown as familyName!appName,\r\n+\t# and importing files with the exclamation point in the middle of the name isn't supported.\r\n+\t# Therefore return only the app name portion.\r\n+\t# Convert this into lowercase to make the file name consistent with other NVDA app modules.\r\n+\treturn appModel.value.split(\"!\")[-1].lower()\r\n \r\n class AppModule(appModuleHandler.AppModule):\r\n \r\n-\tdef event_NVDAObject_init(self,obj):\r\n-\t\t#The root document of HTML Metro Apps must be treeted as an application. \r\n-\t\tif isinstance(obj,Body) and obj.windowClassName==\"Internet Explorer_Server\":\r\n-\t\t\ttry:\r\n-\t\t\t\tpaccParent=obj.IAccessibleObject.accParent.accParent\r\n-\t\t\t\tidentity=IAccessibleHandler.getIAccIdentity(paccParent,0)\r\n-\t\t\texcept (COMError,AttributeError):\r\n-\t\t\t\tidentity=None\r\n-\t\t\tif identity:\r\n-\t\t\t\twindowHandle=identity.get('windowHandle')\r\n-\t\t\t\tif windowHandle and winUser.getClassName(windowHandle)==\"Web Platform Embedding\":\r\n-\t\t\t\t\tobj.role=controlTypes.ROLE_APPLICATION\r\n+\t# WWAHost app content is treated as part of an app, not a browse mode document.\r\n+\tdisableBrowseModeByDefault = True\n", "issue": "Windows Store apps: use app title as product name instead of wwahost\n**Reported by nvdakor on 2014-07-07 13:10**\nHi,\nCurrently, when invoking appModule.productName for a Windows Store app, NVDA says \"Windows operating system\". Although this is fine for built-in apps such as Bing Weather and Windows Store, this may confuse users and developers when they are testing accessibility of Store apps (both existing ones and apps to be created in the future). As a way of providing actual product name for debugging purposes and for people to tell us which app they are having issues with, I propose using app title for productName.\nBefore:\n1. Go to Windows Store app such as Weather.\n2. Once the app opens, press NVDA+F1 to show developer info. Look under appModule.productName.\nAfter:\n1. Open any Windows Store app.\n2. Press NVDA+F1 to open developer info. appModule.productName tells you the title of the app.\nImplementation:\n1. In appModules.wwahost.py, override _get_productName to return the title (api.getForegroundObject().name).\n2. wwahost.AppModule.productName will contain the actual title of the app.\nThanks.\n\n**Blocked by #4569**\n\nLoad specific app modules for apps hosted by wwahost.exe\n**Reported by jteh on 2014-10-23 07:35**\nwwahost.exe hosts many Windows Store apps (though it seems some do have their own executables). To allow for application specific customisation, we should identify the specific application in the app module name.\n\nI think we can use GetApplicationUserModelId (in kernel32) for this purpose. This includes a \"!\" character, so we'll need to check whether Python will cope with this and replace if not.\n**Blocked by #4360**\n**Blocking #4259**\n\n", "before_files": [{"content": "#A part of NonVisual Desktop Access (NVDA)\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n#Copyright (C) 2012 NV Access Limited\r\n\r\nfrom comtypes import COMError\r\nimport IAccessibleHandler\r\nfrom NVDAObjects.IAccessible.MSHTML import Body\r\nimport appModuleHandler\r\nimport controlTypes\r\nimport winUser\r\n\r\nclass AppModule(appModuleHandler.AppModule):\r\n\r\n\tdef event_NVDAObject_init(self,obj):\r\n\t\t#The root document of HTML Metro Apps must be treeted as an application. \r\n\t\tif isinstance(obj,Body) and obj.windowClassName==\"Internet Explorer_Server\":\r\n\t\t\ttry:\r\n\t\t\t\tpaccParent=obj.IAccessibleObject.accParent.accParent\r\n\t\t\t\tidentity=IAccessibleHandler.getIAccIdentity(paccParent,0)\r\n\t\t\texcept (COMError,AttributeError):\r\n\t\t\t\tidentity=None\r\n\t\t\tif identity:\r\n\t\t\t\twindowHandle=identity.get('windowHandle')\r\n\t\t\t\tif windowHandle and winUser.getClassName(windowHandle)==\"Web Platform Embedding\":\r\n\t\t\t\t\tobj.role=controlTypes.ROLE_APPLICATION\r\n", "path": "source/appModules/wwahost.py"}]}
1,232
753
gh_patches_debug_34722
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-355
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Transform BytesCodec into a general codec. now the BytesCodec is for mnist dataset specified, and it should be modified to encode/decode any dataset using bytes format. </issue> <code> [start of record_codec/__init__.py] 1 from record_codec.tf_example_codec import TFExampleCodec 2 3 __all__=[TFExampleCodec] 4 [end of record_codec/__init__.py] [start of record_codec/bytes_codec.py] 1 import numpy as np 2 3 N = 28 4 5 6 # TODO: maybe use TF variant tensor to do more flexible encoding. 7 class BytesCodec(object): 8 def encode(self, data): 9 values = [] 10 for _, f_value in data: 11 values.append(f_value) 12 return np.concatenate(values, axis=None).tobytes() 13 14 def decode(self, record): 15 parsed = np.frombuffer(record, dtype="uint8") 16 assert len(parsed) == N * N + 1 17 label = parsed[-1] 18 parsed = np.resize(parsed[:-1], new_shape=(N, N)) 19 return {'image': parsed, 'label': label} 20 21 def show(self, data, label): 22 """Print the image and label on terminal for debugging""" 23 assert data.shape == (N, N) and data.dtype == "uint8" 24 assert label >= 0 and label <= 9 25 26 def grey(x): 27 return "\033[48;2;%d;%d;%dm" % (x, x, x) + " \033[0m" 28 29 for line in data: 30 s = "".join(grey(x) for x in line) 31 print(s) 32 print("label =", label) 33 [end of record_codec/bytes_codec.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/record_codec/__init__.py b/record_codec/__init__.py --- a/record_codec/__init__.py +++ b/record_codec/__init__.py @@ -1,3 +1,4 @@ +from record_codec.bytes_codec import BytesCodec from record_codec.tf_example_codec import TFExampleCodec -__all__=[TFExampleCodec] +__all__=[BytesCodec, TFExampleCodec] diff --git a/record_codec/bytes_codec.py b/record_codec/bytes_codec.py --- a/record_codec/bytes_codec.py +++ b/record_codec/bytes_codec.py @@ -1,32 +1,42 @@ import numpy as np -N = 28 - -# TODO: maybe use TF variant tensor to do more flexible encoding. class BytesCodec(object): + def __init__(self, feature_columns): + self._feature_columns = feature_columns + self._col_id = { + c.name: order for order, c in enumerate(feature_columns) + } + def encode(self, data): - values = [] - for _, f_value in data: - values.append(f_value) - return np.concatenate(values, axis=None).tobytes() + # Rearrange the data in order of the columns. + values = [None] * len(self._feature_columns) + for f_name, f_value in data: + col_id = self._col_id[f_name] + column = self._feature_columns[col_id] + if column.dtype != f_value.dtype or column.shape != f_value.shape: + raise ValueError( + "Input data doesn't match column %s definition: column: (%s, %s) data: (%s, %s)" % ( + f_name, column.dtype, column.shape, f_value.dtype, f_value.shape) + ) + values[col_id] = f_value.tobytes() + for id, value in enumerate(values): + if value is None: + raise ValueError( + "Missing value for column: %s", + self._col_id[id].name + ) + return b"".join(values) def decode(self, record): - parsed = np.frombuffer(record, dtype="uint8") - assert len(parsed) == N * N + 1 - label = parsed[-1] - parsed = np.resize(parsed[:-1], new_shape=(N, N)) - return {'image': parsed, 'label': label} - - def show(self, data, label): - """Print the image and label on terminal for debugging""" - assert data.shape == (N, N) and data.dtype == "uint8" - assert label >= 0 and label <= 9 - - def grey(x): - return "\033[48;2;%d;%d;%dm" % (x, x, x) + " \033[0m" - - for line in data: - s = "".join(grey(x) for x in line) - print(s) - print("label =", label) + offset = 0 + res = {} + for c in self._feature_columns: + count = np.prod(c.shape) + res[c.name] = np.frombuffer( + record, + dtype=c.dtype.as_numpy_dtype, + count=count, + offset=offset).reshape(c.shape) + offset += count * c.dtype.size + return res
{"golden_diff": "diff --git a/record_codec/__init__.py b/record_codec/__init__.py\n--- a/record_codec/__init__.py\n+++ b/record_codec/__init__.py\n@@ -1,3 +1,4 @@\n+from record_codec.bytes_codec import BytesCodec\n from record_codec.tf_example_codec import TFExampleCodec\n \n-__all__=[TFExampleCodec]\n+__all__=[BytesCodec, TFExampleCodec]\ndiff --git a/record_codec/bytes_codec.py b/record_codec/bytes_codec.py\n--- a/record_codec/bytes_codec.py\n+++ b/record_codec/bytes_codec.py\n@@ -1,32 +1,42 @@\n import numpy as np\n \n-N = 28\n \n-\n-# TODO: maybe use TF variant tensor to do more flexible encoding.\n class BytesCodec(object):\n+ def __init__(self, feature_columns):\n+ self._feature_columns = feature_columns\n+ self._col_id = {\n+ c.name: order for order, c in enumerate(feature_columns)\n+ }\n+\n def encode(self, data):\n- values = [] \n- for _, f_value in data:\n- values.append(f_value)\n- return np.concatenate(values, axis=None).tobytes()\n+ # Rearrange the data in order of the columns.\n+ values = [None] * len(self._feature_columns)\n+ for f_name, f_value in data:\n+ col_id = self._col_id[f_name]\n+ column = self._feature_columns[col_id]\n+ if column.dtype != f_value.dtype or column.shape != f_value.shape:\n+ raise ValueError(\n+ \"Input data doesn't match column %s definition: column: (%s, %s) data: (%s, %s)\" % (\n+ f_name, column.dtype, column.shape, f_value.dtype, f_value.shape)\n+ )\n+ values[col_id] = f_value.tobytes()\n+ for id, value in enumerate(values):\n+ if value is None:\n+ raise ValueError(\n+ \"Missing value for column: %s\",\n+ self._col_id[id].name\n+ )\n+ return b\"\".join(values)\n \n def decode(self, record):\n- parsed = np.frombuffer(record, dtype=\"uint8\")\n- assert len(parsed) == N * N + 1\n- label = parsed[-1]\n- parsed = np.resize(parsed[:-1], new_shape=(N, N))\n- return {'image': parsed, 'label': label}\n-\n- def show(self, data, label):\n- \"\"\"Print the image and label on terminal for debugging\"\"\"\n- assert data.shape == (N, N) and data.dtype == \"uint8\"\n- assert label >= 0 and label <= 9\n-\n- def grey(x):\n- return \"\\033[48;2;%d;%d;%dm\" % (x, x, x) + \" \\033[0m\"\n-\n- for line in data:\n- s = \"\".join(grey(x) for x in line)\n- print(s)\n- print(\"label =\", label)\n+ offset = 0\n+ res = {}\n+ for c in self._feature_columns:\n+ count = np.prod(c.shape)\n+ res[c.name] = np.frombuffer(\n+ record,\n+ dtype=c.dtype.as_numpy_dtype,\n+ count=count,\n+ offset=offset).reshape(c.shape)\n+ offset += count * c.dtype.size\n+ return res\n", "issue": "Transform BytesCodec into a general codec.\nnow the BytesCodec is for mnist dataset specified, and it should be modified to encode/decode any dataset using bytes format.\n", "before_files": [{"content": "from record_codec.tf_example_codec import TFExampleCodec\n\n__all__=[TFExampleCodec]\n", "path": "record_codec/__init__.py"}, {"content": "import numpy as np\n\nN = 28\n\n\n# TODO: maybe use TF variant tensor to do more flexible encoding.\nclass BytesCodec(object):\n def encode(self, data):\n values = [] \n for _, f_value in data:\n values.append(f_value)\n return np.concatenate(values, axis=None).tobytes()\n\n def decode(self, record):\n parsed = np.frombuffer(record, dtype=\"uint8\")\n assert len(parsed) == N * N + 1\n label = parsed[-1]\n parsed = np.resize(parsed[:-1], new_shape=(N, N))\n return {'image': parsed, 'label': label}\n\n def show(self, data, label):\n \"\"\"Print the image and label on terminal for debugging\"\"\"\n assert data.shape == (N, N) and data.dtype == \"uint8\"\n assert label >= 0 and label <= 9\n\n def grey(x):\n return \"\\033[48;2;%d;%d;%dm\" % (x, x, x) + \" \\033[0m\"\n\n for line in data:\n s = \"\".join(grey(x) for x in line)\n print(s)\n print(\"label =\", label)\n", "path": "record_codec/bytes_codec.py"}]}
938
766
gh_patches_debug_1859
rasdani/github-patches
git_diff
NVIDIA__NVFlare-191
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The "show_stats" command got broken The "show_stats server" and "show_stats client" command got the following error. This is caused by this PR change (https://github.com/NVIDIA/NVFlare/pull/162): > show_stats server Error: Failed to communicate with Admin Server localhost on 8003: '_DefaultReplyProcessor' object has no attribute 'process_dict' Done [7269 usecs] 2022-02-08 17:26:12.865006 > </issue> <code> [start of nvflare/fuel/hci/client/api_spec.py] 1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import annotations 16 17 from abc import ABC, abstractmethod 18 from typing import Optional 19 20 from nvflare.fuel.hci.table import Table 21 22 23 class ReplyProcessor: 24 """A base class for parsing server's response.""" 25 26 def reply_start(self, api: AdminAPISpec, reply_json): 27 pass 28 29 def process_string(self, api: AdminAPISpec, item: str): 30 pass 31 32 def process_success(self, api: AdminAPISpec, item: str): 33 pass 34 35 def process_error(self, api: AdminAPISpec, err: str): 36 pass 37 38 def process_table(self, api: AdminAPISpec, table: Table): 39 pass 40 41 def process_shutdown(self, api: AdminAPISpec, msg: str): 42 pass 43 44 def process_token(self, api: AdminAPISpec, token: str): 45 pass 46 47 def protocol_error(self, api: AdminAPISpec, err: str): 48 pass 49 50 def reply_done(self, api: AdminAPISpec): 51 pass 52 53 54 class AdminAPISpec(ABC): 55 def __init__(self): 56 self.reply_processor = None 57 self.command_result = None 58 59 @abstractmethod 60 def server_execute(self, command: str, reply_processor: Optional[ReplyProcessor] = None): 61 """Executes a command on server side. 62 63 Args: 64 command: The command to be executed. 65 reply_processor: Reply callback to use. 66 """ 67 pass 68 69 def set_command_result(self, result): 70 """Sets the result returning from executing the command.""" 71 self.command_result = result 72 73 def get_command_result(self): 74 """Gets the result returning from executing the command.""" 75 return self.command_result 76 [end of nvflare/fuel/hci/client/api_spec.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nvflare/fuel/hci/client/api_spec.py b/nvflare/fuel/hci/client/api_spec.py --- a/nvflare/fuel/hci/client/api_spec.py +++ b/nvflare/fuel/hci/client/api_spec.py @@ -38,6 +38,9 @@ def process_table(self, api: AdminAPISpec, table: Table): pass + def process_dict(self, api: AdminAPISpec, data: dict): + pass + def process_shutdown(self, api: AdminAPISpec, msg: str): pass
{"golden_diff": "diff --git a/nvflare/fuel/hci/client/api_spec.py b/nvflare/fuel/hci/client/api_spec.py\n--- a/nvflare/fuel/hci/client/api_spec.py\n+++ b/nvflare/fuel/hci/client/api_spec.py\n@@ -38,6 +38,9 @@\n def process_table(self, api: AdminAPISpec, table: Table):\n pass\n \n+ def process_dict(self, api: AdminAPISpec, data: dict):\n+ pass\n+\n def process_shutdown(self, api: AdminAPISpec, msg: str):\n pass\n", "issue": "The \"show_stats\" command got broken\nThe \"show_stats server\" and \"show_stats client\" command got the following error. This is caused by this PR change (https://github.com/NVIDIA/NVFlare/pull/162):\r\n\r\n> show_stats server\r\nError: Failed to communicate with Admin Server localhost on 8003: '_DefaultReplyProcessor' object has no attribute 'process_dict'\r\nDone [7269 usecs] 2022-02-08 17:26:12.865006\r\n> \r\n\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom typing import Optional\n\nfrom nvflare.fuel.hci.table import Table\n\n\nclass ReplyProcessor:\n \"\"\"A base class for parsing server's response.\"\"\"\n\n def reply_start(self, api: AdminAPISpec, reply_json):\n pass\n\n def process_string(self, api: AdminAPISpec, item: str):\n pass\n\n def process_success(self, api: AdminAPISpec, item: str):\n pass\n\n def process_error(self, api: AdminAPISpec, err: str):\n pass\n\n def process_table(self, api: AdminAPISpec, table: Table):\n pass\n\n def process_shutdown(self, api: AdminAPISpec, msg: str):\n pass\n\n def process_token(self, api: AdminAPISpec, token: str):\n pass\n\n def protocol_error(self, api: AdminAPISpec, err: str):\n pass\n\n def reply_done(self, api: AdminAPISpec):\n pass\n\n\nclass AdminAPISpec(ABC):\n def __init__(self):\n self.reply_processor = None\n self.command_result = None\n\n @abstractmethod\n def server_execute(self, command: str, reply_processor: Optional[ReplyProcessor] = None):\n \"\"\"Executes a command on server side.\n\n Args:\n command: The command to be executed.\n reply_processor: Reply callback to use.\n \"\"\"\n pass\n\n def set_command_result(self, result):\n \"\"\"Sets the result returning from executing the command.\"\"\"\n self.command_result = result\n\n def get_command_result(self):\n \"\"\"Gets the result returning from executing the command.\"\"\"\n return self.command_result\n", "path": "nvflare/fuel/hci/client/api_spec.py"}]}
1,342
132
gh_patches_debug_39747
rasdani/github-patches
git_diff
NVIDIA__NVFlare-359
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Minor inconsistency between study config generation tool and study spec </issue> <code> [start of nvflare/apis/study_manager_spec.py] 1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import datetime 16 17 18 class Study: 19 def __init__( 20 self, 21 name: str, 22 description: str, 23 sites: [str], 24 users: [str], 25 start_time: datetime.datetime, 26 end_time: datetime.datetime, 27 reviewers=None, 28 ): 29 self.name = name 30 self.description = description 31 self.sites = sites 32 self.users = users 33 self.start_time = start_time 34 self.end_time = end_time 35 self.reviewers = reviewers 36 self.create_time = None 37 38 39 class StudyManagerSpec(object): 40 def create_study(self, study: Study) -> Study: 41 """Create the study object permanently 42 43 The caller must have validated the sites and users of the study. 44 45 Validate the study before saving: 46 The name of the study must be unique; 47 Sites and users must be defined; 48 Start and end time must make sense. 49 50 Args: 51 study: the caller-provided study info 52 53 Returns: updated study info (e.g. create_time is set) 54 55 """ 56 pass 57 58 def list_studies(self) -> [str]: 59 """ 60 List names of all defined studies 61 62 Returns: list of study names 63 64 """ 65 pass 66 67 def list_active_studies(self) -> [str]: 68 """ 69 List names of all active studies (started but not ended) 70 71 Returns: list of study names 72 73 """ 74 pass 75 76 def get_study(self, name: str) -> Study: 77 """Get the Study object for the specified name. 78 79 Args: 80 name: unique name of the study 81 82 Returns: the Study object 83 84 """ 85 pass 86 [end of nvflare/apis/study_manager_spec.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nvflare/apis/study_manager_spec.py b/nvflare/apis/study_manager_spec.py --- a/nvflare/apis/study_manager_spec.py +++ b/nvflare/apis/study_manager_spec.py @@ -12,7 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import datetime +from abc import ABC, abstractmethod +from typing import Dict, List +from datetime import datetime + +from .fl_context import FLContext class Study: @@ -20,32 +24,35 @@ self, name: str, description: str, - sites: [str], - users: [str], - start_time: datetime.datetime, - end_time: datetime.datetime, + contact: str, + participating_clients: List[str], + participating_admins: List[str], + start_date: datetime.date, + end_date: datetime.date, reviewers=None, ): self.name = name self.description = description - self.sites = sites - self.users = users - self.start_time = start_time - self.end_time = end_time + self.contact = contact + self.participating_clients = participating_clients + self.participating_admins = participating_admins + self.start_date = start_date + self.end_date = end_date self.reviewers = reviewers - self.create_time = None + self.created_at = datetime.utcnow().isoformat() -class StudyManagerSpec(object): - def create_study(self, study: Study) -> Study: - """Create the study object permanently +class StudyManagerSpec(ABC): + @abstractmethod + def add_study(self, study: Study, fl_ctx: FLContext) -> Study: + """Add the study object permanently - The caller must have validated the sites and users of the study. + The caller must have validated the participating_clients and participating_admins of the study. Validate the study before saving: The name of the study must be unique; - Sites and users must be defined; - Start and end time must make sense. + participating_clients and participating_admins must be defined; + Start and end date must make sense. Args: study: the caller-provided study info @@ -55,7 +62,8 @@ """ pass - def list_studies(self) -> [str]: + @abstractmethod + def list_studies(self, fl_ctx: FLContext) -> List[str]: """ List names of all defined studies @@ -64,7 +72,8 @@ """ pass - def list_active_studies(self) -> [str]: + @abstractmethod + def list_active_studies(self, fl_ctx: FLContext) -> List[str]: """ List names of all active studies (started but not ended) @@ -73,7 +82,8 @@ """ pass - def get_study(self, name: str) -> Study: + @abstractmethod + def get_study(self, name: str, fl_ctx: FLContext) -> Study: """Get the Study object for the specified name. Args:
{"golden_diff": "diff --git a/nvflare/apis/study_manager_spec.py b/nvflare/apis/study_manager_spec.py\n--- a/nvflare/apis/study_manager_spec.py\n+++ b/nvflare/apis/study_manager_spec.py\n@@ -12,7 +12,11 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import datetime\n+from abc import ABC, abstractmethod\n+from typing import Dict, List\n+from datetime import datetime\n+\n+from .fl_context import FLContext\n \n \n class Study:\n@@ -20,32 +24,35 @@\n self,\n name: str,\n description: str,\n- sites: [str],\n- users: [str],\n- start_time: datetime.datetime,\n- end_time: datetime.datetime,\n+ contact: str,\n+ participating_clients: List[str],\n+ participating_admins: List[str],\n+ start_date: datetime.date,\n+ end_date: datetime.date,\n reviewers=None,\n ):\n self.name = name\n self.description = description\n- self.sites = sites\n- self.users = users\n- self.start_time = start_time\n- self.end_time = end_time\n+ self.contact = contact\n+ self.participating_clients = participating_clients\n+ self.participating_admins = participating_admins\n+ self.start_date = start_date\n+ self.end_date = end_date\n self.reviewers = reviewers\n- self.create_time = None\n+ self.created_at = datetime.utcnow().isoformat()\n \n \n-class StudyManagerSpec(object):\n- def create_study(self, study: Study) -> Study:\n- \"\"\"Create the study object permanently\n+class StudyManagerSpec(ABC):\n+ @abstractmethod\n+ def add_study(self, study: Study, fl_ctx: FLContext) -> Study:\n+ \"\"\"Add the study object permanently\n \n- The caller must have validated the sites and users of the study.\n+ The caller must have validated the participating_clients and participating_admins of the study.\n \n Validate the study before saving:\n The name of the study must be unique;\n- Sites and users must be defined;\n- Start and end time must make sense.\n+ participating_clients and participating_admins must be defined;\n+ Start and end date must make sense.\n \n Args:\n study: the caller-provided study info\n@@ -55,7 +62,8 @@\n \"\"\"\n pass\n \n- def list_studies(self) -> [str]:\n+ @abstractmethod\n+ def list_studies(self, fl_ctx: FLContext) -> List[str]:\n \"\"\"\n List names of all defined studies\n \n@@ -64,7 +72,8 @@\n \"\"\"\n pass\n \n- def list_active_studies(self) -> [str]:\n+ @abstractmethod\n+ def list_active_studies(self, fl_ctx: FLContext) -> List[str]:\n \"\"\"\n List names of all active studies (started but not ended)\n \n@@ -73,7 +82,8 @@\n \"\"\"\n pass\n \n- def get_study(self, name: str) -> Study:\n+ @abstractmethod\n+ def get_study(self, name: str, fl_ctx: FLContext) -> Study:\n \"\"\"Get the Study object for the specified name.\n \n Args:\n", "issue": "Minor inconsistency between study config generation tool and study spec\n\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\n\n\nclass Study:\n def __init__(\n self,\n name: str,\n description: str,\n sites: [str],\n users: [str],\n start_time: datetime.datetime,\n end_time: datetime.datetime,\n reviewers=None,\n ):\n self.name = name\n self.description = description\n self.sites = sites\n self.users = users\n self.start_time = start_time\n self.end_time = end_time\n self.reviewers = reviewers\n self.create_time = None\n\n\nclass StudyManagerSpec(object):\n def create_study(self, study: Study) -> Study:\n \"\"\"Create the study object permanently\n\n The caller must have validated the sites and users of the study.\n\n Validate the study before saving:\n The name of the study must be unique;\n Sites and users must be defined;\n Start and end time must make sense.\n\n Args:\n study: the caller-provided study info\n\n Returns: updated study info (e.g. create_time is set)\n\n \"\"\"\n pass\n\n def list_studies(self) -> [str]:\n \"\"\"\n List names of all defined studies\n\n Returns: list of study names\n\n \"\"\"\n pass\n\n def list_active_studies(self) -> [str]:\n \"\"\"\n List names of all active studies (started but not ended)\n\n Returns: list of study names\n\n \"\"\"\n pass\n\n def get_study(self, name: str) -> Study:\n \"\"\"Get the Study object for the specified name.\n\n Args:\n name: unique name of the study\n\n Returns: the Study object\n\n \"\"\"\n pass\n", "path": "nvflare/apis/study_manager_spec.py"}]}
1,221
725
gh_patches_debug_17658
rasdani/github-patches
git_diff
pantsbuild__pants-12060
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `./pants run ...` does not work for non-venv-mode `pex_binary` targets that re-exec sys.argv[0]. In short, the `run` goal executes via ~: ``` export PEX_PATH=/path/to/requirements.pex export PEX_EXTRA_SYS_PATH=/path/to/source_root_1:/path/to/cource_root2 ./only-contains-entry-point-metadata.pex ``` If the executed code then tries to re-execute via argv[0] (the PEX file itself), then sys.path scrubbing is engaged which strips back off the PEX_PATH and PEX_EXTRA_SYS_PATH triggered sys.path additions since those two env vars are also stripped by default. Either Pants needs to expose the `--no-strip-pex-env` option as a `pex_binary` parameter or else it needs to set this option for `pants run` unconditionally. The concrete example of apps that re-exec via sys.argv[0] are django manage.py apps. See https://github.com/pantsbuild/pex/issues/1349 where @asherf discovered the issue and the mechanism behind it was all worked out. </issue> <code> [start of src/python/pants/backend/python/goals/run_pex_binary.py] 1 # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 import os 5 6 from pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet 7 from pants.backend.python.target_types import ( 8 PexBinaryDefaults, 9 ResolvedPexEntryPoint, 10 ResolvePexEntryPointRequest, 11 ) 12 from pants.backend.python.util_rules.pex import Pex, PexRequest 13 from pants.backend.python.util_rules.pex_environment import PexEnvironment 14 from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest 15 from pants.backend.python.util_rules.python_sources import ( 16 PythonSourceFiles, 17 PythonSourceFilesRequest, 18 ) 19 from pants.core.goals.run import RunFieldSet, RunRequest 20 from pants.engine.fs import Digest, MergeDigests 21 from pants.engine.rules import Get, MultiGet, collect_rules, rule 22 from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest 23 from pants.engine.unions import UnionRule 24 from pants.util.logging import LogLevel 25 26 27 @rule(level=LogLevel.DEBUG) 28 async def create_pex_binary_run_request( 29 field_set: PexBinaryFieldSet, 30 pex_binary_defaults: PexBinaryDefaults, 31 pex_env: PexEnvironment, 32 ) -> RunRequest: 33 entry_point, transitive_targets = await MultiGet( 34 Get( 35 ResolvedPexEntryPoint, 36 ResolvePexEntryPointRequest(field_set.entry_point), 37 ), 38 Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address])), 39 ) 40 41 # Note that we get an intermediate PexRequest here (instead of going straight to a Pex) 42 # so that we can get the interpreter constraints for use in runner_pex_request. 43 requirements_pex_request = await Get( 44 PexRequest, 45 PexFromTargetsRequest, 46 PexFromTargetsRequest.for_requirements([field_set.address], internal_only=True), 47 ) 48 49 requirements_request = Get(Pex, PexRequest, requirements_pex_request) 50 51 sources_request = Get( 52 PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure, include_files=True) 53 ) 54 55 output_filename = f"{field_set.address.target_name}.pex" 56 runner_pex_request = Get( 57 Pex, 58 PexRequest( 59 output_filename=output_filename, 60 interpreter_constraints=requirements_pex_request.interpreter_constraints, 61 additional_args=field_set.generate_additional_args(pex_binary_defaults), 62 internal_only=True, 63 # Note that the entry point file is not in the PEX itself. It's loaded by setting 64 # `PEX_EXTRA_SYS_PATH`. 65 # TODO(John Sirois): Support ConsoleScript in PexBinary targets: 66 # https://github.com/pantsbuild/pants/issues/11619 67 main=entry_point.val, 68 ), 69 ) 70 71 requirements, sources, runner_pex = await MultiGet( 72 requirements_request, sources_request, runner_pex_request 73 ) 74 75 merged_digest = await Get( 76 Digest, 77 MergeDigests( 78 [requirements.digest, sources.source_files.snapshot.digest, runner_pex.digest] 79 ), 80 ) 81 82 def in_chroot(relpath: str) -> str: 83 return os.path.join("{chroot}", relpath) 84 85 args = pex_env.create_argv(in_chroot(runner_pex.name), python=runner_pex.python) 86 87 chrooted_source_roots = [in_chroot(sr) for sr in sources.source_roots] 88 extra_env = { 89 **pex_env.environment_dict(python_configured=runner_pex.python is not None), 90 "PEX_PATH": in_chroot(requirements_pex_request.output_filename), 91 "PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots), 92 } 93 94 return RunRequest(digest=merged_digest, args=args, extra_env=extra_env) 95 96 97 def rules(): 98 return [*collect_rules(), UnionRule(RunFieldSet, PexBinaryFieldSet)] 99 [end of src/python/pants/backend/python/goals/run_pex_binary.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/python/pants/backend/python/goals/run_pex_binary.py b/src/python/pants/backend/python/goals/run_pex_binary.py --- a/src/python/pants/backend/python/goals/run_pex_binary.py +++ b/src/python/pants/backend/python/goals/run_pex_binary.py @@ -58,7 +58,13 @@ PexRequest( output_filename=output_filename, interpreter_constraints=requirements_pex_request.interpreter_constraints, - additional_args=field_set.generate_additional_args(pex_binary_defaults), + additional_args=( + *field_set.generate_additional_args(pex_binary_defaults), + # N.B.: Since we cobble together the runtime environment via PEX_PATH and + # PEX_EXTRA_SYS_PATH below, it's important for any app that re-executes itself that + # these environment variables are not stripped. + "--no-strip-pex-env", + ), internal_only=True, # Note that the entry point file is not in the PEX itself. It's loaded by setting # `PEX_EXTRA_SYS_PATH`.
{"golden_diff": "diff --git a/src/python/pants/backend/python/goals/run_pex_binary.py b/src/python/pants/backend/python/goals/run_pex_binary.py\n--- a/src/python/pants/backend/python/goals/run_pex_binary.py\n+++ b/src/python/pants/backend/python/goals/run_pex_binary.py\n@@ -58,7 +58,13 @@\n PexRequest(\n output_filename=output_filename,\n interpreter_constraints=requirements_pex_request.interpreter_constraints,\n- additional_args=field_set.generate_additional_args(pex_binary_defaults),\n+ additional_args=(\n+ *field_set.generate_additional_args(pex_binary_defaults),\n+ # N.B.: Since we cobble together the runtime environment via PEX_PATH and\n+ # PEX_EXTRA_SYS_PATH below, it's important for any app that re-executes itself that\n+ # these environment variables are not stripped.\n+ \"--no-strip-pex-env\",\n+ ),\n internal_only=True,\n # Note that the entry point file is not in the PEX itself. It's loaded by setting\n # `PEX_EXTRA_SYS_PATH`.\n", "issue": "`./pants run ...` does not work for non-venv-mode `pex_binary` targets that re-exec sys.argv[0].\nIn short, the `run` goal executes via ~:\r\n```\r\nexport PEX_PATH=/path/to/requirements.pex\r\nexport PEX_EXTRA_SYS_PATH=/path/to/source_root_1:/path/to/cource_root2\r\n./only-contains-entry-point-metadata.pex\r\n```\r\n\r\nIf the executed code then tries to re-execute via argv[0] (the PEX file itself), then sys.path scrubbing is engaged which strips back off the PEX_PATH and PEX_EXTRA_SYS_PATH triggered sys.path additions since those two env vars are also stripped by default. Either Pants needs to expose the `--no-strip-pex-env` option as a `pex_binary` parameter or else it needs to set this option for `pants run` unconditionally.\r\n\r\nThe concrete example of apps that re-exec via sys.argv[0] are django manage.py apps.\r\n\r\nSee https://github.com/pantsbuild/pex/issues/1349 where @asherf discovered the issue and the mechanism behind it was all worked out.\n", "before_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\n\nfrom pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet\nfrom pants.backend.python.target_types import (\n PexBinaryDefaults,\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest,\n)\nfrom pants.backend.python.util_rules.pex import Pex, PexRequest\nfrom pants.backend.python.util_rules.pex_environment import PexEnvironment\nfrom pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest\nfrom pants.backend.python.util_rules.python_sources import (\n PythonSourceFiles,\n PythonSourceFilesRequest,\n)\nfrom pants.core.goals.run import RunFieldSet, RunRequest\nfrom pants.engine.fs import Digest, MergeDigests\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.engine.target import TransitiveTargets, TransitiveTargetsRequest\nfrom pants.engine.unions import UnionRule\nfrom pants.util.logging import LogLevel\n\n\n@rule(level=LogLevel.DEBUG)\nasync def create_pex_binary_run_request(\n field_set: PexBinaryFieldSet,\n pex_binary_defaults: PexBinaryDefaults,\n pex_env: PexEnvironment,\n) -> RunRequest:\n entry_point, transitive_targets = await MultiGet(\n Get(\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest(field_set.entry_point),\n ),\n Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address])),\n )\n\n # Note that we get an intermediate PexRequest here (instead of going straight to a Pex)\n # so that we can get the interpreter constraints for use in runner_pex_request.\n requirements_pex_request = await Get(\n PexRequest,\n PexFromTargetsRequest,\n PexFromTargetsRequest.for_requirements([field_set.address], internal_only=True),\n )\n\n requirements_request = Get(Pex, PexRequest, requirements_pex_request)\n\n sources_request = Get(\n PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure, include_files=True)\n )\n\n output_filename = f\"{field_set.address.target_name}.pex\"\n runner_pex_request = Get(\n Pex,\n PexRequest(\n output_filename=output_filename,\n interpreter_constraints=requirements_pex_request.interpreter_constraints,\n additional_args=field_set.generate_additional_args(pex_binary_defaults),\n internal_only=True,\n # Note that the entry point file is not in the PEX itself. It's loaded by setting\n # `PEX_EXTRA_SYS_PATH`.\n # TODO(John Sirois): Support ConsoleScript in PexBinary targets:\n # https://github.com/pantsbuild/pants/issues/11619\n main=entry_point.val,\n ),\n )\n\n requirements, sources, runner_pex = await MultiGet(\n requirements_request, sources_request, runner_pex_request\n )\n\n merged_digest = await Get(\n Digest,\n MergeDigests(\n [requirements.digest, sources.source_files.snapshot.digest, runner_pex.digest]\n ),\n )\n\n def in_chroot(relpath: str) -> str:\n return os.path.join(\"{chroot}\", relpath)\n\n args = pex_env.create_argv(in_chroot(runner_pex.name), python=runner_pex.python)\n\n chrooted_source_roots = [in_chroot(sr) for sr in sources.source_roots]\n extra_env = {\n **pex_env.environment_dict(python_configured=runner_pex.python is not None),\n \"PEX_PATH\": in_chroot(requirements_pex_request.output_filename),\n \"PEX_EXTRA_SYS_PATH\": \":\".join(chrooted_source_roots),\n }\n\n return RunRequest(digest=merged_digest, args=args, extra_env=extra_env)\n\n\ndef rules():\n return [*collect_rules(), UnionRule(RunFieldSet, PexBinaryFieldSet)]\n", "path": "src/python/pants/backend/python/goals/run_pex_binary.py"}]}
1,827
235
gh_patches_debug_5087
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-3641
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve security contact webpage We need to improve our documentation about a user who found a security issue could contact us to report this vulnerability in a confidential way. This page should be clear regarding how to report the issue, how submit a patch (wihtout making it public) and what to do receive feedback / discuss about the solution. There is a page already but it's too poor: https://readthedocs.org/security/ </issue> <code> [start of readthedocs/urls.py] 1 # pylint: disable=missing-docstring 2 from __future__ import absolute_import 3 4 from functools import reduce 5 from operator import add 6 7 from django.conf.urls import url, include 8 from django.contrib import admin 9 from django.conf import settings 10 from django.conf.urls.static import static 11 from django.views.generic.base import TemplateView 12 from tastypie.api import Api 13 14 from readthedocs.api.base import (ProjectResource, UserResource, 15 VersionResource, FileResource) 16 from readthedocs.core.urls import docs_urls, core_urls, deprecated_urls 17 from readthedocs.core.views import (HomepageView, SupportView, 18 server_error_404, server_error_500) 19 from readthedocs.search import views as search_views 20 21 22 v1_api = Api(api_name='v1') 23 v1_api.register(UserResource()) 24 v1_api.register(ProjectResource()) 25 v1_api.register(VersionResource()) 26 v1_api.register(FileResource()) 27 28 admin.autodiscover() 29 30 handler404 = server_error_404 31 handler500 = server_error_500 32 33 basic_urls = [ 34 url(r'^$', HomepageView.as_view(), name='homepage'), 35 url(r'^support/', SupportView.as_view(), name='support'), 36 url(r'^security/', TemplateView.as_view(template_name='security.html')), 37 ] 38 39 rtd_urls = [ 40 url(r'^bookmarks/', include('readthedocs.bookmarks.urls')), 41 url(r'^search/$', search_views.elastic_search, name='search'), 42 url(r'^dashboard/', include('readthedocs.projects.urls.private')), 43 url(r'^profiles/', include('readthedocs.profiles.urls.public')), 44 url(r'^accounts/', include('readthedocs.profiles.urls.private')), 45 url(r'^accounts/', include('allauth.urls')), 46 url(r'^notifications/', include('readthedocs.notifications.urls')), 47 url(r'^accounts/gold/', include('readthedocs.gold.urls')), 48 # For redirects 49 url(r'^builds/', include('readthedocs.builds.urls')), 50 # For testing the 404's with DEBUG on. 51 url(r'^404/$', handler404), 52 # For testing the 500's with DEBUG on. 53 url(r'^500/$', handler500), 54 ] 55 56 project_urls = [ 57 url(r'^projects/', include('readthedocs.projects.urls.public')), 58 ] 59 60 api_urls = [ 61 url(r'^api/', include(v1_api.urls)), 62 url(r'^api/v2/', include('readthedocs.restapi.urls')), 63 url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), 64 url(r'^websupport/', include('readthedocs.comments.urls')), 65 ] 66 67 i18n_urls = [ 68 url(r'^i18n/', include('django.conf.urls.i18n')), 69 ] 70 71 admin_urls = [ 72 url(r'^admin/', include(admin.site.urls)), 73 ] 74 75 debug_urls = add( 76 [ 77 url('style-catalog/$', 78 TemplateView.as_view(template_name='style_catalog.html')), 79 ], 80 static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) 81 ) 82 83 # Export URLs 84 groups = [basic_urls, rtd_urls, project_urls, api_urls, core_urls, i18n_urls, 85 deprecated_urls] 86 87 if settings.USE_PROMOS: 88 # Include donation URL's 89 groups.append([ 90 url(r'^sustainability/', include('readthedocsext.donate.urls')), 91 ]) 92 93 if 'readthedocsext.embed' in settings.INSTALLED_APPS: 94 api_urls.insert( 95 0, 96 url(r'^api/v1/embed/', include('readthedocsext.embed.urls')) 97 ) 98 99 if not getattr(settings, 'USE_SUBDOMAIN', False) or settings.DEBUG: 100 groups.insert(0, docs_urls) 101 if getattr(settings, 'ALLOW_ADMIN', True): 102 groups.append(admin_urls) 103 if getattr(settings, 'DEBUG', False): 104 groups.append(debug_urls) 105 106 urlpatterns = reduce(add, groups) 107 [end of readthedocs/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/urls.py b/readthedocs/urls.py --- a/readthedocs/urls.py +++ b/readthedocs/urls.py @@ -34,6 +34,8 @@ url(r'^$', HomepageView.as_view(), name='homepage'), url(r'^support/', SupportView.as_view(), name='support'), url(r'^security/', TemplateView.as_view(template_name='security.html')), + url(r'^.well-known/security.txt', + TemplateView.as_view(template_name='security.txt', content_type='text/plain')), ] rtd_urls = [
{"golden_diff": "diff --git a/readthedocs/urls.py b/readthedocs/urls.py\n--- a/readthedocs/urls.py\n+++ b/readthedocs/urls.py\n@@ -34,6 +34,8 @@\n url(r'^$', HomepageView.as_view(), name='homepage'),\n url(r'^support/', SupportView.as_view(), name='support'),\n url(r'^security/', TemplateView.as_view(template_name='security.html')),\n+ url(r'^.well-known/security.txt',\n+ TemplateView.as_view(template_name='security.txt', content_type='text/plain')),\n ]\n \n rtd_urls = [\n", "issue": "Improve security contact webpage\nWe need to improve our documentation about a user who found a security issue could contact us to report this vulnerability in a confidential way.\r\n\r\nThis page should be clear regarding how to report the issue, how submit a patch (wihtout making it public) and what to do receive feedback / discuss about the solution.\r\n\r\nThere is a page already but it's too poor: https://readthedocs.org/security/\n", "before_files": [{"content": "# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\n\nfrom functools import reduce\nfrom operator import add\n\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.views.generic.base import TemplateView\nfrom tastypie.api import Api\n\nfrom readthedocs.api.base import (ProjectResource, UserResource,\n VersionResource, FileResource)\nfrom readthedocs.core.urls import docs_urls, core_urls, deprecated_urls\nfrom readthedocs.core.views import (HomepageView, SupportView,\n server_error_404, server_error_500)\nfrom readthedocs.search import views as search_views\n\n\nv1_api = Api(api_name='v1')\nv1_api.register(UserResource())\nv1_api.register(ProjectResource())\nv1_api.register(VersionResource())\nv1_api.register(FileResource())\n\nadmin.autodiscover()\n\nhandler404 = server_error_404\nhandler500 = server_error_500\n\nbasic_urls = [\n url(r'^$', HomepageView.as_view(), name='homepage'),\n url(r'^support/', SupportView.as_view(), name='support'),\n url(r'^security/', TemplateView.as_view(template_name='security.html')),\n]\n\nrtd_urls = [\n url(r'^bookmarks/', include('readthedocs.bookmarks.urls')),\n url(r'^search/$', search_views.elastic_search, name='search'),\n url(r'^dashboard/', include('readthedocs.projects.urls.private')),\n url(r'^profiles/', include('readthedocs.profiles.urls.public')),\n url(r'^accounts/', include('readthedocs.profiles.urls.private')),\n url(r'^accounts/', include('allauth.urls')),\n url(r'^notifications/', include('readthedocs.notifications.urls')),\n url(r'^accounts/gold/', include('readthedocs.gold.urls')),\n # For redirects\n url(r'^builds/', include('readthedocs.builds.urls')),\n # For testing the 404's with DEBUG on.\n url(r'^404/$', handler404),\n # For testing the 500's with DEBUG on.\n url(r'^500/$', handler500),\n]\n\nproject_urls = [\n url(r'^projects/', include('readthedocs.projects.urls.public')),\n]\n\napi_urls = [\n url(r'^api/', include(v1_api.urls)),\n url(r'^api/v2/', include('readthedocs.restapi.urls')),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^websupport/', include('readthedocs.comments.urls')),\n]\n\ni18n_urls = [\n url(r'^i18n/', include('django.conf.urls.i18n')),\n]\n\nadmin_urls = [\n url(r'^admin/', include(admin.site.urls)),\n]\n\ndebug_urls = add(\n [\n url('style-catalog/$',\n TemplateView.as_view(template_name='style_catalog.html')),\n ],\n static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n)\n\n# Export URLs\ngroups = [basic_urls, rtd_urls, project_urls, api_urls, core_urls, i18n_urls,\n deprecated_urls]\n\nif settings.USE_PROMOS:\n # Include donation URL's\n groups.append([\n url(r'^sustainability/', include('readthedocsext.donate.urls')),\n ])\n\nif 'readthedocsext.embed' in settings.INSTALLED_APPS:\n api_urls.insert(\n 0,\n url(r'^api/v1/embed/', include('readthedocsext.embed.urls'))\n )\n\nif not getattr(settings, 'USE_SUBDOMAIN', False) or settings.DEBUG:\n groups.insert(0, docs_urls)\nif getattr(settings, 'ALLOW_ADMIN', True):\n groups.append(admin_urls)\nif getattr(settings, 'DEBUG', False):\n groups.append(debug_urls)\n\nurlpatterns = reduce(add, groups)\n", "path": "readthedocs/urls.py"}]}
1,681
129
gh_patches_debug_876
rasdani/github-patches
git_diff
microsoft__Qcodes-867
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> missing dependency`jsonschema` in requirements.txt The latest pip installable version of QCoDeS does not list jsonschema as a dependency but requires it. This problem came to light when running tests on a project that depeneds on QCoDeS. Part of my build script installs qcodes (pip install qcodes). Importing qcodes then raises an exception because jsonschema is missing. </issue> <code> [start of setup.py] 1 from setuptools import setup, find_packages 2 from distutils.version import StrictVersion 3 from importlib import import_module 4 import re 5 6 def get_version(verbose=1): 7 """ Extract version information from source code """ 8 9 try: 10 with open('qcodes/version.py', 'r') as f: 11 ln = f.readline() 12 # print(ln) 13 m = re.search('.* ''(.*)''', ln) 14 version = (m.group(1)).strip('\'') 15 except Exception as E: 16 print(E) 17 version = 'none' 18 if verbose: 19 print('get_version: %s' % version) 20 return version 21 22 23 def readme(): 24 with open('README.rst') as f: 25 return f.read() 26 27 extras = { 28 'MatPlot': ('matplotlib', '2.0.2'), 29 'QtPlot': ('pyqtgraph', '0.10.0'), 30 'coverage tests': ('coverage', '4.0'), 31 'Slack': ('slacker', '0.9.42') 32 } 33 extras_require = {k: '>='.join(v) for k, v in extras.items()} 34 35 setup(name='qcodes', 36 version=get_version(), 37 use_2to3=False, 38 39 maintainer='Jens H Nielsen', 40 maintainer_email='[email protected]', 41 description='Python-based data acquisition framework developed by the ' 42 'Copenhagen / Delft / Sydney / Microsoft quantum computing ' 43 'consortium', 44 long_description=readme(), 45 url='https://github.com/QCoDeS/Qcodes', 46 classifiers=[ 47 'Development Status :: 3 - Alpha', 48 'Intended Audience :: Science/Research', 49 'Programming Language :: Python :: 3 :: Only', 50 'Programming Language :: Python :: 3.5', 51 'Programming Language :: Python :: 3.6', 52 'Topic :: Scientific/Engineering' 53 ], 54 license='MIT', 55 # if we want to install without tests: 56 # packages=find_packages(exclude=["*.tests", "tests"]), 57 packages=find_packages(), 58 package_data={'qcodes': ['monitor/dist/*', 'monitor/dist/js/*', 59 'monitor/dist/css/*', 'config/*.json']}, 60 install_requires=[ 61 'numpy>=1.10', 62 'pyvisa>=1.8', 63 'h5py>=2.6', 64 'websockets>=3.2,<3.4' 65 ], 66 67 test_suite='qcodes.tests', 68 extras_require=extras_require, 69 70 # I think the only part of qcodes that would care about zip_safe 71 # is utils.helpers.reload_code; users of a zip-installed package 72 # shouldn't be needing to do this anyway, but we should test first. 73 zip_safe=False) 74 75 version_template = ''' 76 ***** 77 ***** package {0} must be at least version {1}. 78 ***** Please upgrade it (pip install -U {0} or conda install {0}) 79 ***** in order to use {2} 80 ***** 81 ''' 82 83 missing_template = ''' 84 ***** 85 ***** package {0} not found 86 ***** Please install it (pip install {0} or conda install {0}) 87 ***** in order to use {1} 88 ***** 89 ''' 90 91 valueerror_template = ''' 92 ***** 93 ***** package {0} version not understood 94 ***** Please make sure the installed version ({1}) 95 ***** is compatible with the minimum required version ({2}) 96 ***** in order to use {3} 97 ***** 98 ''' 99 100 # now test the versions of extras 101 for extra, (module_name, min_version) in extras.items(): 102 try: 103 module = import_module(module_name) 104 if StrictVersion(module.__version__) < StrictVersion(min_version): 105 print(version_template.format(module_name, min_version, extra)) 106 except ImportError: 107 print(missing_template.format(module_name, extra)) 108 except ValueError: 109 print(valueerror_template.format( 110 module_name, module.__version__, min_version, extra)) 111 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -61,7 +61,8 @@ 'numpy>=1.10', 'pyvisa>=1.8', 'h5py>=2.6', - 'websockets>=3.2,<3.4' + 'websockets>=3.2,<3.4', + 'jsonschema' ], test_suite='qcodes.tests',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,7 +61,8 @@\n 'numpy>=1.10',\n 'pyvisa>=1.8',\n 'h5py>=2.6',\n- 'websockets>=3.2,<3.4'\n+ 'websockets>=3.2,<3.4',\n+ 'jsonschema'\n ],\n \n test_suite='qcodes.tests',\n", "issue": "missing dependency`jsonschema` in requirements.txt\nThe latest pip installable version of QCoDeS does not list jsonschema as a dependency but requires it. \r\n\r\nThis problem came to light when running tests on a project that depeneds on QCoDeS. Part of my build script installs qcodes (pip install qcodes). Importing qcodes then raises an exception because jsonschema is missing. \n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom distutils.version import StrictVersion\nfrom importlib import import_module\nimport re\n\ndef get_version(verbose=1):\n \"\"\" Extract version information from source code \"\"\"\n\n try:\n with open('qcodes/version.py', 'r') as f:\n ln = f.readline()\n # print(ln)\n m = re.search('.* ''(.*)''', ln)\n version = (m.group(1)).strip('\\'')\n except Exception as E:\n print(E)\n version = 'none'\n if verbose:\n print('get_version: %s' % version)\n return version\n\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\nextras = {\n 'MatPlot': ('matplotlib', '2.0.2'),\n 'QtPlot': ('pyqtgraph', '0.10.0'),\n 'coverage tests': ('coverage', '4.0'),\n 'Slack': ('slacker', '0.9.42')\n}\nextras_require = {k: '>='.join(v) for k, v in extras.items()}\n\nsetup(name='qcodes',\n version=get_version(),\n use_2to3=False,\n\n maintainer='Jens H Nielsen',\n maintainer_email='[email protected]',\n description='Python-based data acquisition framework developed by the '\n 'Copenhagen / Delft / Sydney / Microsoft quantum computing '\n 'consortium',\n long_description=readme(),\n url='https://github.com/QCoDeS/Qcodes',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering'\n ],\n license='MIT',\n # if we want to install without tests:\n # packages=find_packages(exclude=[\"*.tests\", \"tests\"]),\n packages=find_packages(),\n package_data={'qcodes': ['monitor/dist/*', 'monitor/dist/js/*',\n 'monitor/dist/css/*', 'config/*.json']},\n install_requires=[\n 'numpy>=1.10',\n 'pyvisa>=1.8',\n 'h5py>=2.6',\n 'websockets>=3.2,<3.4'\n ],\n\n test_suite='qcodes.tests',\n extras_require=extras_require,\n\n # I think the only part of qcodes that would care about zip_safe\n # is utils.helpers.reload_code; users of a zip-installed package\n # shouldn't be needing to do this anyway, but we should test first.\n zip_safe=False)\n\nversion_template = '''\n*****\n***** package {0} must be at least version {1}.\n***** Please upgrade it (pip install -U {0} or conda install {0})\n***** in order to use {2}\n*****\n'''\n\nmissing_template = '''\n*****\n***** package {0} not found\n***** Please install it (pip install {0} or conda install {0})\n***** in order to use {1}\n*****\n'''\n\nvalueerror_template = '''\n*****\n***** package {0} version not understood\n***** Please make sure the installed version ({1})\n***** is compatible with the minimum required version ({2})\n***** in order to use {3}\n*****\n'''\n\n# now test the versions of extras\nfor extra, (module_name, min_version) in extras.items():\n try:\n module = import_module(module_name)\n if StrictVersion(module.__version__) < StrictVersion(min_version):\n print(version_template.format(module_name, min_version, extra))\n except ImportError:\n print(missing_template.format(module_name, extra))\n except ValueError:\n print(valueerror_template.format(\n module_name, module.__version__, min_version, extra))\n", "path": "setup.py"}]}
1,677
106
gh_patches_debug_60939
rasdani/github-patches
git_diff
Netflix__lemur-796
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Adding domain fails on unselectable "sensitive" Client side ![screenshot_2017-05-12_11-06-54](https://cloud.githubusercontent.com/assets/445200/25991405/528d417a-3703-11e7-9e6c-d70beb6d38e2.png) Server side ``` May 12 09:05:48 lemur supervisord: lemur-web [2017-05-12 09:05:48,892] ERROR in schema: 'sensitive' May 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last): May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/common/schema.py", line 158, in decorated_function May 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs) May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/domains/views.py", line 126, in post May 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive']) May 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive' May 12 09:05:48 lemur supervisord: lemur-web May 12 09:05:48 lemur supervisord: lemur-web 'sensitive' May 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last): May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/common/schema.py", line 158, in decorated_function May 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs) May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/domains/views.py", line 126, in post May 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive']) May 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive' ``` </issue> <code> [start of lemur/domains/schemas.py] 1 """ 2 .. module: lemur.domains.schemas 3 :platform: unix 4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more 5 :license: Apache, see LICENSE for more details. 6 .. moduleauthor:: Kevin Glisson <[email protected]> 7 """ 8 from marshmallow import fields 9 from lemur.common.schema import LemurInputSchema, LemurOutputSchema 10 from lemur.schemas import AssociatedCertificateSchema 11 12 # from lemur.certificates.schemas import CertificateNestedOutputSchema 13 14 15 class DomainInputSchema(LemurInputSchema): 16 id = fields.Integer() 17 name = fields.String(required=True) 18 sensitive = fields.Boolean() 19 certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[]) 20 21 22 class DomainOutputSchema(LemurOutputSchema): 23 id = fields.Integer() 24 name = fields.String() 25 sensitive = fields.Boolean() 26 # certificates = fields.Nested(CertificateNestedOutputSchema, many=True, missing=[]) 27 28 29 class DomainNestedOutputSchema(DomainOutputSchema): 30 __envelope__ = False 31 32 33 domain_input_schema = DomainInputSchema() 34 domain_output_schema = DomainOutputSchema() 35 domains_output_schema = DomainOutputSchema(many=True) 36 [end of lemur/domains/schemas.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lemur/domains/schemas.py b/lemur/domains/schemas.py --- a/lemur/domains/schemas.py +++ b/lemur/domains/schemas.py @@ -15,7 +15,7 @@ class DomainInputSchema(LemurInputSchema): id = fields.Integer() name = fields.String(required=True) - sensitive = fields.Boolean() + sensitive = fields.Boolean(missing=False) certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])
{"golden_diff": "diff --git a/lemur/domains/schemas.py b/lemur/domains/schemas.py\n--- a/lemur/domains/schemas.py\n+++ b/lemur/domains/schemas.py\n@@ -15,7 +15,7 @@\n class DomainInputSchema(LemurInputSchema):\n id = fields.Integer()\n name = fields.String(required=True)\n- sensitive = fields.Boolean()\n+ sensitive = fields.Boolean(missing=False)\n certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])\n", "issue": "Adding domain fails on unselectable \"sensitive\"\nClient side\r\n\r\n![screenshot_2017-05-12_11-06-54](https://cloud.githubusercontent.com/assets/445200/25991405/528d417a-3703-11e7-9e6c-d70beb6d38e2.png)\r\n\r\n\r\nServer side\r\n\r\n```\r\nMay 12 09:05:48 lemur supervisord: lemur-web [2017-05-12 09:05:48,892] ERROR in schema: 'sensitive'\r\nMay 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last):\r\nMay 12 09:05:48 lemur supervisord: lemur-web File \"/var/www/lemur/lemur/common/schema.py\", line 158, in decorated_function\r\nMay 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs)\r\nMay 12 09:05:48 lemur supervisord: lemur-web File \"/var/www/lemur/lemur/domains/views.py\", line 126, in post\r\nMay 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive'])\r\nMay 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive'\r\nMay 12 09:05:48 lemur supervisord: lemur-web\r\nMay 12 09:05:48 lemur supervisord: lemur-web 'sensitive'\r\nMay 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last):\r\nMay 12 09:05:48 lemur supervisord: lemur-web File \"/var/www/lemur/lemur/common/schema.py\", line 158, in decorated_function\r\nMay 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs)\r\nMay 12 09:05:48 lemur supervisord: lemur-web File \"/var/www/lemur/lemur/domains/views.py\", line 126, in post\r\nMay 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive'])\r\nMay 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive'\r\n```\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.domains.schemas\n :platform: unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nfrom marshmallow import fields\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\nfrom lemur.schemas import AssociatedCertificateSchema\n\n# from lemur.certificates.schemas import CertificateNestedOutputSchema\n\n\nclass DomainInputSchema(LemurInputSchema):\n id = fields.Integer()\n name = fields.String(required=True)\n sensitive = fields.Boolean()\n certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])\n\n\nclass DomainOutputSchema(LemurOutputSchema):\n id = fields.Integer()\n name = fields.String()\n sensitive = fields.Boolean()\n # certificates = fields.Nested(CertificateNestedOutputSchema, many=True, missing=[])\n\n\nclass DomainNestedOutputSchema(DomainOutputSchema):\n __envelope__ = False\n\n\ndomain_input_schema = DomainInputSchema()\ndomain_output_schema = DomainOutputSchema()\ndomains_output_schema = DomainOutputSchema(many=True)\n", "path": "lemur/domains/schemas.py"}]}
1,511
117
gh_patches_debug_10372
rasdani/github-patches
git_diff
scrapy__scrapy-4170
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Handle it gracefully when start_url is used instead of start_urls Over the last year I’ve seen a few cases ([recent example](https://stackoverflow.com/q/58664004/939364)) of this, people missing the `s` at the end of the `start_urls`. It may be nice to find a way to gracefully let the developer know where the issue is, why there is no crawling happening. </issue> <code> [start of scrapy/spiders/__init__.py] 1 """ 2 Base class for Scrapy spiders 3 4 See documentation in docs/topics/spiders.rst 5 """ 6 import logging 7 import warnings 8 9 from scrapy import signals 10 from scrapy.http import Request 11 from scrapy.utils.trackref import object_ref 12 from scrapy.utils.url import url_is_from_spider 13 from scrapy.exceptions import ScrapyDeprecationWarning 14 from scrapy.utils.deprecate import method_is_overridden 15 16 17 class Spider(object_ref): 18 """Base class for scrapy spiders. All spiders must inherit from this 19 class. 20 """ 21 22 name = None 23 custom_settings = None 24 25 def __init__(self, name=None, **kwargs): 26 if name is not None: 27 self.name = name 28 elif not getattr(self, 'name', None): 29 raise ValueError("%s must have a name" % type(self).__name__) 30 self.__dict__.update(kwargs) 31 if not hasattr(self, 'start_urls'): 32 self.start_urls = [] 33 34 @property 35 def logger(self): 36 logger = logging.getLogger(self.name) 37 return logging.LoggerAdapter(logger, {'spider': self}) 38 39 def log(self, message, level=logging.DEBUG, **kw): 40 """Log the given message at the given log level 41 42 This helper wraps a log call to the logger within the spider, but you 43 can use it directly (e.g. Spider.logger.info('msg')) or use any other 44 Python logger too. 45 """ 46 self.logger.log(level, message, **kw) 47 48 @classmethod 49 def from_crawler(cls, crawler, *args, **kwargs): 50 spider = cls(*args, **kwargs) 51 spider._set_crawler(crawler) 52 return spider 53 54 def _set_crawler(self, crawler): 55 self.crawler = crawler 56 self.settings = crawler.settings 57 crawler.signals.connect(self.close, signals.spider_closed) 58 59 def start_requests(self): 60 cls = self.__class__ 61 if method_is_overridden(cls, Spider, 'make_requests_from_url'): 62 warnings.warn( 63 "Spider.make_requests_from_url method is deprecated; it " 64 "won't be called in future Scrapy releases. Please " 65 "override Spider.start_requests method instead (see %s.%s)." % ( 66 cls.__module__, cls.__name__ 67 ), 68 ) 69 for url in self.start_urls: 70 yield self.make_requests_from_url(url) 71 else: 72 for url in self.start_urls: 73 yield Request(url, dont_filter=True) 74 75 def make_requests_from_url(self, url): 76 """ This method is deprecated. """ 77 return Request(url, dont_filter=True) 78 79 def parse(self, response): 80 raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__)) 81 82 @classmethod 83 def update_settings(cls, settings): 84 settings.setdict(cls.custom_settings or {}, priority='spider') 85 86 @classmethod 87 def handles_request(cls, request): 88 return url_is_from_spider(request.url, cls) 89 90 @staticmethod 91 def close(spider, reason): 92 closed = getattr(spider, 'closed', None) 93 if callable(closed): 94 return closed(reason) 95 96 def __str__(self): 97 return "<%s %r at 0x%0x>" % (type(self).__name__, self.name, id(self)) 98 99 __repr__ = __str__ 100 101 102 # Top-level imports 103 from scrapy.spiders.crawl import CrawlSpider, Rule 104 from scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider 105 from scrapy.spiders.sitemap import SitemapSpider 106 [end of scrapy/spiders/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/spiders/__init__.py b/scrapy/spiders/__init__.py --- a/scrapy/spiders/__init__.py +++ b/scrapy/spiders/__init__.py @@ -58,6 +58,11 @@ def start_requests(self): cls = self.__class__ + if not self.start_urls and hasattr(self, 'start_url'): + raise AttributeError( + "Crawling could not start: 'start_urls' not found " + "or empty (but found 'start_url' attribute instead, " + "did you miss an 's'?)") if method_is_overridden(cls, Spider, 'make_requests_from_url'): warnings.warn( "Spider.make_requests_from_url method is deprecated; it "
{"golden_diff": "diff --git a/scrapy/spiders/__init__.py b/scrapy/spiders/__init__.py\n--- a/scrapy/spiders/__init__.py\n+++ b/scrapy/spiders/__init__.py\n@@ -58,6 +58,11 @@\n \n def start_requests(self):\n cls = self.__class__\n+ if not self.start_urls and hasattr(self, 'start_url'):\n+ raise AttributeError(\n+ \"Crawling could not start: 'start_urls' not found \"\n+ \"or empty (but found 'start_url' attribute instead, \"\n+ \"did you miss an 's'?)\")\n if method_is_overridden(cls, Spider, 'make_requests_from_url'):\n warnings.warn(\n \"Spider.make_requests_from_url method is deprecated; it \"\n", "issue": "Handle it gracefully when start_url is used instead of start_urls\nOver the last year I\u2019ve seen a few cases ([recent example](https://stackoverflow.com/q/58664004/939364)) of this, people missing the `s` at the end of the `start_urls`.\r\n\r\nIt may be nice to find a way to gracefully let the developer know where the issue is, why there is no crawling happening.\n", "before_files": [{"content": "\"\"\"\nBase class for Scrapy spiders\n\nSee documentation in docs/topics/spiders.rst\n\"\"\"\nimport logging\nimport warnings\n\nfrom scrapy import signals\nfrom scrapy.http import Request\nfrom scrapy.utils.trackref import object_ref\nfrom scrapy.utils.url import url_is_from_spider\nfrom scrapy.exceptions import ScrapyDeprecationWarning\nfrom scrapy.utils.deprecate import method_is_overridden\n\n\nclass Spider(object_ref):\n \"\"\"Base class for scrapy spiders. All spiders must inherit from this\n class.\n \"\"\"\n\n name = None\n custom_settings = None\n\n def __init__(self, name=None, **kwargs):\n if name is not None:\n self.name = name\n elif not getattr(self, 'name', None):\n raise ValueError(\"%s must have a name\" % type(self).__name__)\n self.__dict__.update(kwargs)\n if not hasattr(self, 'start_urls'):\n self.start_urls = []\n\n @property\n def logger(self):\n logger = logging.getLogger(self.name)\n return logging.LoggerAdapter(logger, {'spider': self})\n\n def log(self, message, level=logging.DEBUG, **kw):\n \"\"\"Log the given message at the given log level\n\n This helper wraps a log call to the logger within the spider, but you\n can use it directly (e.g. Spider.logger.info('msg')) or use any other\n Python logger too.\n \"\"\"\n self.logger.log(level, message, **kw)\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n spider = cls(*args, **kwargs)\n spider._set_crawler(crawler)\n return spider\n\n def _set_crawler(self, crawler):\n self.crawler = crawler\n self.settings = crawler.settings\n crawler.signals.connect(self.close, signals.spider_closed)\n\n def start_requests(self):\n cls = self.__class__\n if method_is_overridden(cls, Spider, 'make_requests_from_url'):\n warnings.warn(\n \"Spider.make_requests_from_url method is deprecated; it \"\n \"won't be called in future Scrapy releases. Please \"\n \"override Spider.start_requests method instead (see %s.%s).\" % (\n cls.__module__, cls.__name__\n ),\n )\n for url in self.start_urls:\n yield self.make_requests_from_url(url)\n else:\n for url in self.start_urls:\n yield Request(url, dont_filter=True)\n\n def make_requests_from_url(self, url):\n \"\"\" This method is deprecated. \"\"\"\n return Request(url, dont_filter=True)\n\n def parse(self, response):\n raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))\n\n @classmethod\n def update_settings(cls, settings):\n settings.setdict(cls.custom_settings or {}, priority='spider')\n\n @classmethod\n def handles_request(cls, request):\n return url_is_from_spider(request.url, cls)\n\n @staticmethod\n def close(spider, reason):\n closed = getattr(spider, 'closed', None)\n if callable(closed):\n return closed(reason)\n\n def __str__(self):\n return \"<%s %r at 0x%0x>\" % (type(self).__name__, self.name, id(self))\n\n __repr__ = __str__\n\n\n# Top-level imports\nfrom scrapy.spiders.crawl import CrawlSpider, Rule\nfrom scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider\nfrom scrapy.spiders.sitemap import SitemapSpider\n", "path": "scrapy/spiders/__init__.py"}]}
1,596
170
gh_patches_debug_866
rasdani/github-patches
git_diff
streamlit__streamlit-5184
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> It should be : https://github.com/streamlit/streamlit/blob/535f11765817657892506d6904bbbe04908dbdf3/lib/streamlit/elements/alert.py#L145 </issue> <code> [start of lib/streamlit/elements/alert.py] 1 # Copyright 2018-2022 Streamlit Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import cast, Optional, TYPE_CHECKING 16 17 from streamlit.errors import StreamlitAPIException 18 from streamlit.proto.Alert_pb2 import Alert as AlertProto 19 from streamlit.string_util import clean_text, is_emoji 20 21 if TYPE_CHECKING: 22 from streamlit.delta_generator import DeltaGenerator 23 from streamlit.type_util import SupportsStr 24 25 26 def validate_emoji(maybe_emoji: Optional[str]) -> str: 27 if maybe_emoji is None: 28 return "" 29 elif is_emoji(maybe_emoji): 30 return maybe_emoji 31 else: 32 raise StreamlitAPIException( 33 f'The value "{maybe_emoji}" is not a valid emoji. Shortcodes are not allowed, please use a single character instead.' 34 ) 35 36 37 class AlertMixin: 38 def error( 39 self, 40 body: "SupportsStr", 41 *, # keyword-only args: 42 icon: Optional[str] = None, 43 ) -> "DeltaGenerator": 44 """Display error message. 45 46 Parameters 47 ---------- 48 icon : None 49 An optional parameter, that adds an emoji to the alert. 50 The default is None. 51 This argument can only be supplied by keyword. 52 body : str 53 The error text to display. 54 55 Example 56 ------- 57 >>> st.error('This is an error', icon="🚨") 58 59 """ 60 alert_proto = AlertProto() 61 alert_proto.icon = validate_emoji(icon) 62 alert_proto.body = clean_text(body) 63 alert_proto.format = AlertProto.ERROR 64 return self.dg._enqueue("alert", alert_proto) 65 66 def warning( 67 self, 68 body: "SupportsStr", 69 *, # keyword-only args: 70 icon: Optional[str] = None, 71 ) -> "DeltaGenerator": 72 """Display warning message. 73 74 Parameters 75 ---------- 76 icon : None 77 An optional parameter, that adds an emoji to the alert. 78 The default is None. 79 This argument can only be supplied by keyword. 80 81 body : str 82 The warning text to display. 83 84 Example 85 ------- 86 >>> st.warning('This is a warning', icon="⚠️") 87 88 """ 89 alert_proto = AlertProto() 90 alert_proto.body = clean_text(body) 91 alert_proto.icon = validate_emoji(icon) 92 alert_proto.format = AlertProto.WARNING 93 return self.dg._enqueue("alert", alert_proto) 94 95 def info( 96 self, 97 body: "SupportsStr", 98 *, # keyword-only args: 99 icon: Optional[str] = None, 100 ) -> "DeltaGenerator": 101 """Display an informational message. 102 103 Parameters 104 ---------- 105 icon : None 106 An optional parameter, that adds an emoji to the alert. 107 The default is None. 108 This argument can only be supplied by keyword. 109 110 body : str 111 The info text to display. 112 113 Example 114 ------- 115 >>> st.info('This is a purely informational message', icon="ℹ️") 116 117 """ 118 119 alert_proto = AlertProto() 120 alert_proto.body = clean_text(body) 121 alert_proto.icon = validate_emoji(icon) 122 alert_proto.format = AlertProto.INFO 123 return self.dg._enqueue("alert", alert_proto) 124 125 def success( 126 self, 127 body: "SupportsStr", 128 *, # keyword-only args: 129 icon: Optional[str] = None, 130 ) -> "DeltaGenerator": 131 """Display a success message. 132 133 Parameters 134 ---------- 135 icon : None 136 An optional parameter, that adds an emoji to the alert. 137 The default is None. 138 This argument can only be supplied by keyword. 139 140 body : str 141 The success text to display. 142 143 Example 144 ------- 145 >>> st.success('This is a success message!', icon:"✅") 146 147 """ 148 alert_proto = AlertProto() 149 alert_proto.body = clean_text(body) 150 alert_proto.icon = validate_emoji(icon) 151 alert_proto.format = AlertProto.SUCCESS 152 return self.dg._enqueue("alert", alert_proto) 153 154 @property 155 def dg(self) -> "DeltaGenerator": 156 """Get our DeltaGenerator.""" 157 return cast("DeltaGenerator", self) 158 [end of lib/streamlit/elements/alert.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/streamlit/elements/alert.py b/lib/streamlit/elements/alert.py --- a/lib/streamlit/elements/alert.py +++ b/lib/streamlit/elements/alert.py @@ -142,7 +142,7 @@ Example ------- - >>> st.success('This is a success message!', icon:"✅") + >>> st.success('This is a success message!', icon="✅") """ alert_proto = AlertProto()
{"golden_diff": "diff --git a/lib/streamlit/elements/alert.py b/lib/streamlit/elements/alert.py\n--- a/lib/streamlit/elements/alert.py\n+++ b/lib/streamlit/elements/alert.py\n@@ -142,7 +142,7 @@\n \n Example\n -------\n- >>> st.success('This is a success message!', icon:\"\u2705\")\n+ >>> st.success('This is a success message!', icon=\"\u2705\")\n \n \"\"\"\n alert_proto = AlertProto()\n", "issue": "It should be :\nhttps://github.com/streamlit/streamlit/blob/535f11765817657892506d6904bbbe04908dbdf3/lib/streamlit/elements/alert.py#L145\r\n\n", "before_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import cast, Optional, TYPE_CHECKING\n\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.Alert_pb2 import Alert as AlertProto\nfrom streamlit.string_util import clean_text, is_emoji\n\nif TYPE_CHECKING:\n from streamlit.delta_generator import DeltaGenerator\n from streamlit.type_util import SupportsStr\n\n\ndef validate_emoji(maybe_emoji: Optional[str]) -> str:\n if maybe_emoji is None:\n return \"\"\n elif is_emoji(maybe_emoji):\n return maybe_emoji\n else:\n raise StreamlitAPIException(\n f'The value \"{maybe_emoji}\" is not a valid emoji. Shortcodes are not allowed, please use a single character instead.'\n )\n\n\nclass AlertMixin:\n def error(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display error message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n body : str\n The error text to display.\n\n Example\n -------\n >>> st.error('This is an error', icon=\"\ud83d\udea8\")\n\n \"\"\"\n alert_proto = AlertProto()\n alert_proto.icon = validate_emoji(icon)\n alert_proto.body = clean_text(body)\n alert_proto.format = AlertProto.ERROR\n return self.dg._enqueue(\"alert\", alert_proto)\n\n def warning(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display warning message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n\n body : str\n The warning text to display.\n\n Example\n -------\n >>> st.warning('This is a warning', icon=\"\u26a0\ufe0f\")\n\n \"\"\"\n alert_proto = AlertProto()\n alert_proto.body = clean_text(body)\n alert_proto.icon = validate_emoji(icon)\n alert_proto.format = AlertProto.WARNING\n return self.dg._enqueue(\"alert\", alert_proto)\n\n def info(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display an informational message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n\n body : str\n The info text to display.\n\n Example\n -------\n >>> st.info('This is a purely informational message', icon=\"\u2139\ufe0f\")\n\n \"\"\"\n\n alert_proto = AlertProto()\n alert_proto.body = clean_text(body)\n alert_proto.icon = validate_emoji(icon)\n alert_proto.format = AlertProto.INFO\n return self.dg._enqueue(\"alert\", alert_proto)\n\n def success(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display a success message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n\n body : str\n The success text to display.\n\n Example\n -------\n >>> st.success('This is a success message!', icon:\"\u2705\")\n\n \"\"\"\n alert_proto = AlertProto()\n alert_proto.body = clean_text(body)\n alert_proto.icon = validate_emoji(icon)\n alert_proto.format = AlertProto.SUCCESS\n return self.dg._enqueue(\"alert\", alert_proto)\n\n @property\n def dg(self) -> \"DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/alert.py"}]}
1,989
103
gh_patches_debug_8773
rasdani/github-patches
git_diff
google__fuzzbench-72
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Eclipser maxfilelen value I experienced the same problem that you had in choosing maxfilelen to evaluate Eclipser some time ago. I found that they used 1048576 in their experiments (https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/master/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25), so maybe you want to use this value to produce consistent results with the paper. </issue> <code> [start of fuzzers/eclipser/fuzzer.py] 1 # Copyright 2020 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """Integration code for Eclipser fuzzer.""" 15 16 import os 17 import subprocess 18 import time 19 from multiprocessing import Process 20 21 from fuzzers import utils 22 23 24 def build(): 25 """Build fuzzer.""" 26 # QEMU does not work with sanitizers, so skip -fsanitize=. See 27 # https://github.com/SoftSec-KAIST/Eclipser/issues/5 28 utils.set_no_sanitizer_compilation_flags() 29 cflags = [ 30 '-O2', 31 '-fno-omit-frame-pointer', 32 ] 33 utils.append_flags('CFLAGS', cflags) 34 utils.append_flags('CXXFLAGS', cflags) 35 36 os.environ['CC'] = 'clang' 37 os.environ['CXX'] = 'clang++' 38 os.environ['FUZZER_LIB'] = '/libStandaloneFuzzTarget.a' 39 40 utils.build_benchmark() 41 42 43 def fuzz(input_corpus, output_corpus, target_binary): 44 """Run fuzzer.""" 45 # Create an encoded temp corpus directory. 46 encoded_temp_corpus = os.path.join(os.path.dirname(input_corpus), 47 'temp-corpus') 48 if not os.path.exists(encoded_temp_corpus): 49 os.mkdir(encoded_temp_corpus) 50 51 print('[run_fuzzer] Running target with Eclipser') 52 command = [ 53 'dotnet', 54 '/Eclipser/build/Eclipser.dll', 55 'fuzz', 56 '-p', 57 target_binary, 58 '-t', 59 '1048576', # FIXME: Find the max value allowed here. 60 '-o', 61 encoded_temp_corpus, 62 '--src', 63 'file', 64 '--initarg', 65 'foo', # Specifies how command line argument is passed, just a file. 66 '-f', 67 'foo', 68 '--maxfilelen', 69 str(10 * 1024 * 1024), # Increase since default is too low (8 bytes). 70 ] 71 if os.listdir(input_corpus): # Important, otherwise Eclipser crashes. 72 command += ['-i', input_corpus] 73 subprocess.Popen(command) 74 75 process = Process(target=copy_corpus_directory, 76 args=( 77 encoded_temp_corpus, 78 output_corpus, 79 )) 80 process.start() 81 82 83 def copy_corpus_directory(encoded_temp_corpus, output_corpus): 84 """Copies corpus periodically from encoded corpus directory into output 85 directory.""" 86 while True: 87 # Wait for initial fuzzer initialization, and after every copy. 88 time.sleep(120) 89 90 subprocess.call([ 91 'dotnet', 92 '/Eclipser/build/Eclipser.dll', 93 'decode', 94 '-i', 95 os.path.join(encoded_temp_corpus, 'testcase'), 96 '-o', 97 output_corpus, 98 ]) 99 [end of fuzzers/eclipser/fuzzer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fuzzers/eclipser/fuzzer.py b/fuzzers/eclipser/fuzzer.py --- a/fuzzers/eclipser/fuzzer.py +++ b/fuzzers/eclipser/fuzzer.py @@ -66,7 +66,9 @@ '-f', 'foo', '--maxfilelen', - str(10 * 1024 * 1024), # Increase since default is too low (8 bytes). + # Default is too low (8 bytes), match experiment config at: + # https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/6aadf02eeadb0416bd4c5edeafc8627bc24ebc82/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25 + '1048576', ] if os.listdir(input_corpus): # Important, otherwise Eclipser crashes. command += ['-i', input_corpus]
{"golden_diff": "diff --git a/fuzzers/eclipser/fuzzer.py b/fuzzers/eclipser/fuzzer.py\n--- a/fuzzers/eclipser/fuzzer.py\n+++ b/fuzzers/eclipser/fuzzer.py\n@@ -66,7 +66,9 @@\n '-f',\n 'foo',\n '--maxfilelen',\n- str(10 * 1024 * 1024), # Increase since default is too low (8 bytes).\n+ # Default is too low (8 bytes), match experiment config at:\n+ # https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/6aadf02eeadb0416bd4c5edeafc8627bc24ebc82/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25\n+ '1048576',\n ]\n if os.listdir(input_corpus): # Important, otherwise Eclipser crashes.\n command += ['-i', input_corpus]\n", "issue": "Eclipser maxfilelen value\nI experienced the same problem that you had in choosing maxfilelen to evaluate Eclipser some time ago.\r\nI found that they used 1048576 in their experiments (https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/master/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25), so maybe you want to use this value to produce consistent results with the paper.\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Integration code for Eclipser fuzzer.\"\"\"\n\nimport os\nimport subprocess\nimport time\nfrom multiprocessing import Process\n\nfrom fuzzers import utils\n\n\ndef build():\n \"\"\"Build fuzzer.\"\"\"\n # QEMU does not work with sanitizers, so skip -fsanitize=. See\n # https://github.com/SoftSec-KAIST/Eclipser/issues/5\n utils.set_no_sanitizer_compilation_flags()\n cflags = [\n '-O2',\n '-fno-omit-frame-pointer',\n ]\n utils.append_flags('CFLAGS', cflags)\n utils.append_flags('CXXFLAGS', cflags)\n\n os.environ['CC'] = 'clang'\n os.environ['CXX'] = 'clang++'\n os.environ['FUZZER_LIB'] = '/libStandaloneFuzzTarget.a'\n\n utils.build_benchmark()\n\n\ndef fuzz(input_corpus, output_corpus, target_binary):\n \"\"\"Run fuzzer.\"\"\"\n # Create an encoded temp corpus directory.\n encoded_temp_corpus = os.path.join(os.path.dirname(input_corpus),\n 'temp-corpus')\n if not os.path.exists(encoded_temp_corpus):\n os.mkdir(encoded_temp_corpus)\n\n print('[run_fuzzer] Running target with Eclipser')\n command = [\n 'dotnet',\n '/Eclipser/build/Eclipser.dll',\n 'fuzz',\n '-p',\n target_binary,\n '-t',\n '1048576', # FIXME: Find the max value allowed here.\n '-o',\n encoded_temp_corpus,\n '--src',\n 'file',\n '--initarg',\n 'foo', # Specifies how command line argument is passed, just a file.\n '-f',\n 'foo',\n '--maxfilelen',\n str(10 * 1024 * 1024), # Increase since default is too low (8 bytes).\n ]\n if os.listdir(input_corpus): # Important, otherwise Eclipser crashes.\n command += ['-i', input_corpus]\n subprocess.Popen(command)\n\n process = Process(target=copy_corpus_directory,\n args=(\n encoded_temp_corpus,\n output_corpus,\n ))\n process.start()\n\n\ndef copy_corpus_directory(encoded_temp_corpus, output_corpus):\n \"\"\"Copies corpus periodically from encoded corpus directory into output\n directory.\"\"\"\n while True:\n # Wait for initial fuzzer initialization, and after every copy.\n time.sleep(120)\n\n subprocess.call([\n 'dotnet',\n '/Eclipser/build/Eclipser.dll',\n 'decode',\n '-i',\n os.path.join(encoded_temp_corpus, 'testcase'),\n '-o',\n output_corpus,\n ])\n", "path": "fuzzers/eclipser/fuzzer.py"}]}
1,532
227
gh_patches_debug_9888
rasdani/github-patches
git_diff
DDMAL__CantusDB-1415
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use Django Extensions, deprecate and remove ptvsd [Django Extensions](https://django-extensions.readthedocs.io/en/latest/) are a really useful set of management and development tools. One of the most useful ones (I find) is `runserver_plus`, which you can run instead of the normal `runserver` when developing. This gives you access to an in-browser debugger tool, replacing the standard Django error pages with an interactive traceback and debugger. Another useful one is `shell_plus` which can pre-load all of your models into an interactive Python shell. If you also have iPython installed it will use that, making the Python repl much easier to use. With a move to these tools, I think [the modifications](https://github.com/DDMAL/CantusDB/blob/develop/django/cantusdb_project/manage.py#L9-L18) to `manage.py` can be un-done, and the dependency on the ptvsd module can be removed. This module anyway [seems to be deprecated](https://github.com/microsoft/ptvsd). </issue> <code> [start of django/cantusdb_project/manage.py] 1 #!/usr/bin/env python 2 """Django's command-line utility for administrative tasks.""" 3 import os 4 import sys 5 6 7 def main(): 8 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cantusdb.settings") 9 # start new section 10 from django.conf import settings 11 12 if settings.DEBUG: 13 if os.environ.get("RUN_MAIN") or os.environ.get("WERKZEUG_RUN_MAIN"): 14 import ptvsd 15 16 ptvsd.enable_attach(address=("0.0.0.0", 3000)) 17 print("Attached!") 18 # end new section 19 20 try: 21 from django.core.management import execute_from_command_line 22 except ImportError as exc: 23 raise ImportError( 24 "Couldn't import Django. Are you sure it's installed and " 25 "available on your PYTHONPATH environment variable? Did you " 26 "forget to activate a virtual environment?" 27 ) from exc 28 execute_from_command_line(sys.argv) 29 30 31 if __name__ == "__main__": 32 main() 33 [end of django/cantusdb_project/manage.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/manage.py b/django/cantusdb_project/manage.py --- a/django/cantusdb_project/manage.py +++ b/django/cantusdb_project/manage.py @@ -6,17 +6,6 @@ def main(): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cantusdb.settings") - # start new section - from django.conf import settings - - if settings.DEBUG: - if os.environ.get("RUN_MAIN") or os.environ.get("WERKZEUG_RUN_MAIN"): - import ptvsd - - ptvsd.enable_attach(address=("0.0.0.0", 3000)) - print("Attached!") - # end new section - try: from django.core.management import execute_from_command_line except ImportError as exc:
{"golden_diff": "diff --git a/django/cantusdb_project/manage.py b/django/cantusdb_project/manage.py\n--- a/django/cantusdb_project/manage.py\n+++ b/django/cantusdb_project/manage.py\n@@ -6,17 +6,6 @@\n \n def main():\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cantusdb.settings\")\n- # start new section\n- from django.conf import settings\n-\n- if settings.DEBUG:\n- if os.environ.get(\"RUN_MAIN\") or os.environ.get(\"WERKZEUG_RUN_MAIN\"):\n- import ptvsd\n-\n- ptvsd.enable_attach(address=(\"0.0.0.0\", 3000))\n- print(\"Attached!\")\n- # end new section\n-\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n", "issue": "Use Django Extensions, deprecate and remove ptvsd\n[Django Extensions](https://django-extensions.readthedocs.io/en/latest/) are a really useful set of management and development tools. One of the most useful ones (I find) is `runserver_plus`, which you can run instead of the normal `runserver` when developing. This gives you access to an in-browser debugger tool, replacing the standard Django error pages with an interactive traceback and debugger.\r\n\r\nAnother useful one is `shell_plus` which can pre-load all of your models into an interactive Python shell. If you also have iPython installed it will use that, making the Python repl much easier to use.\r\n\r\nWith a move to these tools, I think [the modifications](https://github.com/DDMAL/CantusDB/blob/develop/django/cantusdb_project/manage.py#L9-L18) to `manage.py` can be un-done, and the dependency on the ptvsd module can be removed. This module anyway [seems to be deprecated](https://github.com/microsoft/ptvsd). \n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Django's command-line utility for administrative tasks.\"\"\"\nimport os\nimport sys\n\n\ndef main():\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cantusdb.settings\")\n # start new section\n from django.conf import settings\n\n if settings.DEBUG:\n if os.environ.get(\"RUN_MAIN\") or os.environ.get(\"WERKZEUG_RUN_MAIN\"):\n import ptvsd\n\n ptvsd.enable_attach(address=(\"0.0.0.0\", 3000))\n print(\"Attached!\")\n # end new section\n\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "django/cantusdb_project/manage.py"}]}
1,038
189
gh_patches_debug_24404
rasdani/github-patches
git_diff
mdn__kuma-6974
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> T - Add 301 redirect to Subscriptions Management page `/payments/recurring/management` is moving to `/payments/management`, therefore we need to add 301 redirect in kuma/payments/urls.py See https://github.com/mdn/kuma/issues/6703#issuecomment-614246571 for context </issue> <code> [start of kuma/payments/urls.py] 1 from django.urls import path 2 3 from . import views 4 5 lang_urlpatterns = [ 6 path("terms/", views.payment_terms, name="payment_terms"), 7 path("thank-you/", views.thank_you, name="thank_you"), 8 path("management/", views.payment_management, name="payment_management",), 9 path("", views.index, name="payments_index"), 10 ] 11 [end of kuma/payments/urls.py] [start of kuma/payments/views.py] 1 import logging 2 3 from django.shortcuts import render 4 from django.views.decorators.cache import never_cache 5 from waffle.decorators import waffle_flag 6 7 from kuma.users.models import User 8 9 10 log = logging.getLogger("kuma.payments.views") 11 12 13 @never_cache 14 def index(request): 15 highest_subscriber_number = User.get_highest_subscriber_number() 16 # TODO: This is never unit tested because our tests never test SSR rendering. 17 # See https://github.com/mdn/kuma/issues/6797 18 context = {"next_subscriber_number": highest_subscriber_number + 1} 19 return render(request, "payments/index.html", context) 20 21 22 @waffle_flag("subscription") 23 @never_cache 24 def thank_you(request): 25 return render(request, "payments/thank-you.html") 26 27 28 @waffle_flag("subscription") 29 @never_cache 30 def payment_terms(request): 31 return render(request, "payments/terms.html") 32 33 34 @waffle_flag("subscription") 35 @never_cache 36 def payment_management(request): 37 return render(request, "payments/management.html") 38 [end of kuma/payments/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kuma/payments/urls.py b/kuma/payments/urls.py --- a/kuma/payments/urls.py +++ b/kuma/payments/urls.py @@ -1,10 +1,17 @@ from django.urls import path +from django.views.generic import RedirectView from . import views lang_urlpatterns = [ path("terms/", views.payment_terms, name="payment_terms"), path("thank-you/", views.thank_you, name="thank_you"), - path("management/", views.payment_management, name="payment_management",), + path( + # This is the old URL we had for a while + "recurring/management/", + RedirectView.as_view(pattern_name="payment_management", permanent=True), + name="recurring_payment_management", + ), + path("management/", views.payment_management, name="payment_management"), path("", views.index, name="payments_index"), ] diff --git a/kuma/payments/views.py b/kuma/payments/views.py --- a/kuma/payments/views.py +++ b/kuma/payments/views.py @@ -13,8 +13,6 @@ @never_cache def index(request): highest_subscriber_number = User.get_highest_subscriber_number() - # TODO: This is never unit tested because our tests never test SSR rendering. - # See https://github.com/mdn/kuma/issues/6797 context = {"next_subscriber_number": highest_subscriber_number + 1} return render(request, "payments/index.html", context)
{"golden_diff": "diff --git a/kuma/payments/urls.py b/kuma/payments/urls.py\n--- a/kuma/payments/urls.py\n+++ b/kuma/payments/urls.py\n@@ -1,10 +1,17 @@\n from django.urls import path\n+from django.views.generic import RedirectView\n \n from . import views\n \n lang_urlpatterns = [\n path(\"terms/\", views.payment_terms, name=\"payment_terms\"),\n path(\"thank-you/\", views.thank_you, name=\"thank_you\"),\n- path(\"management/\", views.payment_management, name=\"payment_management\",),\n+ path(\n+ # This is the old URL we had for a while\n+ \"recurring/management/\",\n+ RedirectView.as_view(pattern_name=\"payment_management\", permanent=True),\n+ name=\"recurring_payment_management\",\n+ ),\n+ path(\"management/\", views.payment_management, name=\"payment_management\"),\n path(\"\", views.index, name=\"payments_index\"),\n ]\ndiff --git a/kuma/payments/views.py b/kuma/payments/views.py\n--- a/kuma/payments/views.py\n+++ b/kuma/payments/views.py\n@@ -13,8 +13,6 @@\n @never_cache\n def index(request):\n highest_subscriber_number = User.get_highest_subscriber_number()\n- # TODO: This is never unit tested because our tests never test SSR rendering.\n- # See https://github.com/mdn/kuma/issues/6797\n context = {\"next_subscriber_number\": highest_subscriber_number + 1}\n return render(request, \"payments/index.html\", context)\n", "issue": "T - Add 301 redirect to Subscriptions Management page \n`/payments/recurring/management` is moving to `/payments/management`, therefore we need to add 301 redirect in kuma/payments/urls.py\r\n\r\nSee https://github.com/mdn/kuma/issues/6703#issuecomment-614246571 for context \n", "before_files": [{"content": "from django.urls import path\n\nfrom . import views\n\nlang_urlpatterns = [\n path(\"terms/\", views.payment_terms, name=\"payment_terms\"),\n path(\"thank-you/\", views.thank_you, name=\"thank_you\"),\n path(\"management/\", views.payment_management, name=\"payment_management\",),\n path(\"\", views.index, name=\"payments_index\"),\n]\n", "path": "kuma/payments/urls.py"}, {"content": "import logging\n\nfrom django.shortcuts import render\nfrom django.views.decorators.cache import never_cache\nfrom waffle.decorators import waffle_flag\n\nfrom kuma.users.models import User\n\n\nlog = logging.getLogger(\"kuma.payments.views\")\n\n\n@never_cache\ndef index(request):\n highest_subscriber_number = User.get_highest_subscriber_number()\n # TODO: This is never unit tested because our tests never test SSR rendering.\n # See https://github.com/mdn/kuma/issues/6797\n context = {\"next_subscriber_number\": highest_subscriber_number + 1}\n return render(request, \"payments/index.html\", context)\n\n\n@waffle_flag(\"subscription\")\n@never_cache\ndef thank_you(request):\n return render(request, \"payments/thank-you.html\")\n\n\n@waffle_flag(\"subscription\")\n@never_cache\ndef payment_terms(request):\n return render(request, \"payments/terms.html\")\n\n\n@waffle_flag(\"subscription\")\n@never_cache\ndef payment_management(request):\n return render(request, \"payments/management.html\")\n", "path": "kuma/payments/views.py"}]}
1,019
340
gh_patches_debug_455
rasdani/github-patches
git_diff
openfun__marsha-2411
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 🐛(backend) licence not saved during creation video resource ## Bug Report **Problematic Behavior** When we create a video and set a licence, the licence is not saved. [error-licence-2023-09-12 143121.webm](https://github.com/openfun/marsha/assets/25994652/60514ad8-07cd-4390-97c9-21eb3525ecc6) </issue> <code> [start of src/backend/marsha/core/forms.py] 1 """Marsha forms module.""" 2 from django.core.exceptions import ValidationError 3 from django.forms import CharField, ModelForm 4 5 from . import models 6 from .defaults import INITIALIZED 7 8 9 class DocumentForm(ModelForm): 10 """Form to create or update documents.""" 11 12 class Meta: 13 """Meta for DocumentForm.""" 14 15 model = models.Document 16 fields = ["description", "is_public", "lti_id", "playlist", "title"] 17 18 19 class VideoForm(ModelForm): 20 """Form to create or update videos.""" 21 22 upload_state = CharField( 23 max_length=20, 24 required=False, 25 ) 26 27 class Meta: 28 """Meta for VideoForm.""" 29 30 model = models.Video 31 fields = [ 32 "description", 33 "is_public", 34 "lti_id", 35 "playlist", 36 "title", 37 "upload_state", 38 ] 39 40 def clean_upload_state(self): 41 """Check upload_state valid value.""" 42 upload_state = self.cleaned_data["upload_state"] 43 44 if upload_state and upload_state != INITIALIZED: 45 raise ValidationError(f"{INITIALIZED} is the only accepted value") 46 47 return upload_state 48 [end of src/backend/marsha/core/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/backend/marsha/core/forms.py b/src/backend/marsha/core/forms.py --- a/src/backend/marsha/core/forms.py +++ b/src/backend/marsha/core/forms.py @@ -35,6 +35,7 @@ "playlist", "title", "upload_state", + "license", ] def clean_upload_state(self):
{"golden_diff": "diff --git a/src/backend/marsha/core/forms.py b/src/backend/marsha/core/forms.py\n--- a/src/backend/marsha/core/forms.py\n+++ b/src/backend/marsha/core/forms.py\n@@ -35,6 +35,7 @@\n \"playlist\",\n \"title\",\n \"upload_state\",\n+ \"license\",\n ]\n \n def clean_upload_state(self):\n", "issue": "\ud83d\udc1b(backend) licence not saved during creation video resource\n## Bug Report\r\n\r\n**Problematic Behavior**\r\nWhen we create a video and set a licence, the licence is not saved.\r\n\r\n[error-licence-2023-09-12 143121.webm](https://github.com/openfun/marsha/assets/25994652/60514ad8-07cd-4390-97c9-21eb3525ecc6)\r\n\r\n\n", "before_files": [{"content": "\"\"\"Marsha forms module.\"\"\"\nfrom django.core.exceptions import ValidationError\nfrom django.forms import CharField, ModelForm\n\nfrom . import models\nfrom .defaults import INITIALIZED\n\n\nclass DocumentForm(ModelForm):\n \"\"\"Form to create or update documents.\"\"\"\n\n class Meta:\n \"\"\"Meta for DocumentForm.\"\"\"\n\n model = models.Document\n fields = [\"description\", \"is_public\", \"lti_id\", \"playlist\", \"title\"]\n\n\nclass VideoForm(ModelForm):\n \"\"\"Form to create or update videos.\"\"\"\n\n upload_state = CharField(\n max_length=20,\n required=False,\n )\n\n class Meta:\n \"\"\"Meta for VideoForm.\"\"\"\n\n model = models.Video\n fields = [\n \"description\",\n \"is_public\",\n \"lti_id\",\n \"playlist\",\n \"title\",\n \"upload_state\",\n ]\n\n def clean_upload_state(self):\n \"\"\"Check upload_state valid value.\"\"\"\n upload_state = self.cleaned_data[\"upload_state\"]\n\n if upload_state and upload_state != INITIALIZED:\n raise ValidationError(f\"{INITIALIZED} is the only accepted value\")\n\n return upload_state\n", "path": "src/backend/marsha/core/forms.py"}]}
988
85
gh_patches_debug_22905
rasdani/github-patches
git_diff
streamlink__streamlink-1511
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Kanal 7 does not show ## **Checklist** - [x] This is a bug report. - [ ] This is a feature request. - [ ] This is a plugin (improvement) request. - [ ] I have read the contribution guidelines. ## **Description** i cant see anything at kanal 7.com . i have test it with this links but i became black screen ## **Reproduction steps / Explicit stream URLs to test** #SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//new.10gbps.tv%3a443/live/kanal7LiveDesktop/index.m3u8 #DESCRIPTION KANAL 7 #SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/http%3a//www.kanal7.com/canli-izle #DESCRIPTION KANAL 7 #SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/http%3a//www.izle7.com/canli-yayin-frame?air=1 #DESCRIPTION KANAL 7 </issue> <code> [start of src/streamlink/plugins/kanal7.py] 1 from __future__ import print_function 2 import re 3 4 from streamlink.plugin import Plugin 5 from streamlink.plugin.api import http 6 from streamlink.plugin.api import useragents 7 from streamlink.plugin.api import validate 8 from streamlink.stream import HLSStream 9 10 11 class Kanal7(Plugin): 12 url_re = re.compile(r"https?://(?:www.)?kanal7.com/canli-izle") 13 iframe_re = re.compile(r'iframe .*?src="(http://[^"]*?)"') 14 stream_re = re.compile(r'''tp_file\s+=\s+['"](http[^"]*?)['"]''') 15 16 @classmethod 17 def can_handle_url(cls, url): 18 return cls.url_re.match(url) is not None 19 20 def find_iframe(self, url): 21 res = http.get(url) 22 # find iframe url 23 iframe = self.iframe_re.search(res.text) 24 iframe_url = iframe and iframe.group(1) 25 if iframe_url: 26 self.logger.debug("Found iframe: {}", iframe_url) 27 return iframe_url 28 29 def _get_streams(self): 30 iframe1 = self.find_iframe(self.url) 31 if iframe1: 32 iframe2 = self.find_iframe(iframe1) 33 if iframe2: 34 ires = http.get(iframe2) 35 stream_m = self.stream_re.search(ires.text) 36 stream_url = stream_m and stream_m.group(1) 37 if stream_url: 38 yield "live", HLSStream(self.session, stream_url, headers={"Referer": iframe2}) 39 else: 40 self.logger.error("Could not find second iframe, has the page layout changed?") 41 else: 42 self.logger.error("Could not find iframe, has the page layout changed?") 43 44 45 __plugin__ = Kanal7 46 [end of src/streamlink/plugins/kanal7.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/kanal7.py b/src/streamlink/plugins/kanal7.py --- a/src/streamlink/plugins/kanal7.py +++ b/src/streamlink/plugins/kanal7.py @@ -6,12 +6,13 @@ from streamlink.plugin.api import useragents from streamlink.plugin.api import validate from streamlink.stream import HLSStream +from streamlink.utils import update_scheme class Kanal7(Plugin): url_re = re.compile(r"https?://(?:www.)?kanal7.com/canli-izle") - iframe_re = re.compile(r'iframe .*?src="(http://[^"]*?)"') - stream_re = re.compile(r'''tp_file\s+=\s+['"](http[^"]*?)['"]''') + iframe_re = re.compile(r'iframe .*?src="((?:http:)?//[^"]*?)"') + stream_re = re.compile(r'''video-source\s*=\s*['"](http[^"']*?)['"]''') @classmethod def can_handle_url(cls, url): @@ -23,6 +24,7 @@ iframe = self.iframe_re.search(res.text) iframe_url = iframe and iframe.group(1) if iframe_url: + iframe_url = update_scheme(self.url, iframe_url) self.logger.debug("Found iframe: {}", iframe_url) return iframe_url
{"golden_diff": "diff --git a/src/streamlink/plugins/kanal7.py b/src/streamlink/plugins/kanal7.py\n--- a/src/streamlink/plugins/kanal7.py\n+++ b/src/streamlink/plugins/kanal7.py\n@@ -6,12 +6,13 @@\n from streamlink.plugin.api import useragents\n from streamlink.plugin.api import validate\n from streamlink.stream import HLSStream\n+from streamlink.utils import update_scheme\n \n \n class Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n- iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n- stream_re = re.compile(r'''tp_file\\s+=\\s+['\"](http[^\"]*?)['\"]''')\n+ iframe_re = re.compile(r'iframe .*?src=\"((?:http:)?//[^\"]*?)\"')\n+ stream_re = re.compile(r'''video-source\\s*=\\s*['\"](http[^\"']*?)['\"]''')\n \n @classmethod\n def can_handle_url(cls, url):\n@@ -23,6 +24,7 @@\n iframe = self.iframe_re.search(res.text)\n iframe_url = iframe and iframe.group(1)\n if iframe_url:\n+ iframe_url = update_scheme(self.url, iframe_url)\n self.logger.debug(\"Found iframe: {}\", iframe_url)\n return iframe_url\n", "issue": "Kanal 7 does not show\n## **Checklist**\r\n\r\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [ ] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n## **Description**\r\n\r\n i cant see anything at kanal 7.com . i have test it with this links but i became black screen \r\n\r\n## **Reproduction steps / Explicit stream URLs to test**\r\n\r\n#SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//new.10gbps.tv%3a443/live/kanal7LiveDesktop/index.m3u8\r\n#DESCRIPTION KANAL 7 \r\n#SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/http%3a//www.kanal7.com/canli-izle\r\n#DESCRIPTION KANAL 7\r\n#SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/http%3a//www.izle7.com/canli-yayin-frame?air=1\r\n#DESCRIPTION KANAL 7\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import useragents\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n stream_re = re.compile(r'''tp_file\\s+=\\s+['\"](http[^\"]*?)['\"]''')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def find_iframe(self, url):\n res = http.get(url)\n # find iframe url\n iframe = self.iframe_re.search(res.text)\n iframe_url = iframe and iframe.group(1)\n if iframe_url:\n self.logger.debug(\"Found iframe: {}\", iframe_url)\n return iframe_url\n\n def _get_streams(self):\n iframe1 = self.find_iframe(self.url)\n if iframe1:\n iframe2 = self.find_iframe(iframe1)\n if iframe2:\n ires = http.get(iframe2)\n stream_m = self.stream_re.search(ires.text)\n stream_url = stream_m and stream_m.group(1)\n if stream_url:\n yield \"live\", HLSStream(self.session, stream_url, headers={\"Referer\": iframe2})\n else:\n self.logger.error(\"Could not find second iframe, has the page layout changed?\")\n else:\n self.logger.error(\"Could not find iframe, has the page layout changed?\")\n\n\n__plugin__ = Kanal7\n", "path": "src/streamlink/plugins/kanal7.py"}]}
1,368
313
gh_patches_debug_15114
rasdani/github-patches
git_diff
nextcloud__appstore-246
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Email change form Among the account pages should be a page from which a user can change their email address. The email field on the `User` model needs to be updated and a correspondig django-allauth `EmailAddress` object should be associated with the user. django-allauth supports multiple email addresses per user, but we only want one. New email addresses should be verified with [this](http://django-allauth.readthedocs.io/en/latest/views.html#e-mail-verification). The page should be located at `/account/email/`. @BernhardPosselt Do you agree with the above? </issue> <code> [start of nextcloudappstore/core/user/views.py] 1 from allauth.account.views import PasswordChangeView 2 from django.contrib import messages 3 from django.contrib.auth.mixins import LoginRequiredMixin 4 from django.contrib.auth.models import User 5 from django.core.urlresolvers import reverse_lazy 6 from django.shortcuts import redirect, render 7 from django.views.generic import TemplateView 8 from django.views.generic import UpdateView 9 10 from nextcloudappstore.core.user.forms import DeleteAccountForm 11 12 13 class ChangeLanguageView(LoginRequiredMixin, TemplateView): 14 template_name = 'user/set-language.html' 15 16 def get_context_data(self, **kwargs): 17 context = super().get_context_data(**kwargs) 18 context['acc_page'] = 'account-change-language' 19 return context 20 21 22 class DeleteAccountView(LoginRequiredMixin, TemplateView): 23 template_name = 'user/delete-account.html' 24 25 def get_context_data(self, **kwargs): 26 context = super().get_context_data(**kwargs) 27 context['form'] = DeleteAccountForm() 28 context['acc_page'] = 'delete-account' 29 return context 30 31 def post(self, request, *args, **kwargs): 32 form = DeleteAccountForm(request.POST, user=request.user) 33 if form.is_valid(): 34 request.user.delete() 35 return redirect(reverse_lazy('home')) 36 else: 37 return render(request, self.template_name, {'form': form}) 38 39 40 class AccountView(LoginRequiredMixin, UpdateView): 41 """Display and allow changing of the user's name.""" 42 43 template_name = 'user/account.html' 44 template_name_suffix = '' 45 model = User 46 fields = ['first_name', 'last_name'] 47 success_url = reverse_lazy('user:account') 48 49 def get_context_data(self, **kwargs): 50 context = super().get_context_data(**kwargs) 51 context['acc_page'] = 'account' 52 return context 53 54 def form_valid(self, form): 55 messages.success(self.request, 'Name saved.') 56 return super().form_valid(form) 57 58 def get_object(self, queryset=None): 59 return self.request.user 60 61 62 class PasswordView(LoginRequiredMixin, PasswordChangeView): 63 """Allow the user to change their password.""" 64 65 template_name = 'user/password.html' 66 success_url = reverse_lazy('user:account-password') 67 68 def get_context_data(self, **kwargs): 69 context = super().get_context_data(**kwargs) 70 context['acc_page'] = 'password' 71 return context 72 73 74 class APITokenView(LoginRequiredMixin, TemplateView): 75 """Display the user's API token, and allow it to be regenerated.""" 76 77 template_name = 'user/api-token.html' 78 79 def get_context_data(self, **kwargs): 80 context = super().get_context_data(**kwargs) 81 context['acc_page'] = 'api-token' 82 return context 83 [end of nextcloudappstore/core/user/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nextcloudappstore/core/user/views.py b/nextcloudappstore/core/user/views.py --- a/nextcloudappstore/core/user/views.py +++ b/nextcloudappstore/core/user/views.py @@ -43,7 +43,7 @@ template_name = 'user/account.html' template_name_suffix = '' model = User - fields = ['first_name', 'last_name'] + fields = ['first_name', 'last_name', 'email'] success_url = reverse_lazy('user:account') def get_context_data(self, **kwargs): @@ -52,7 +52,7 @@ return context def form_valid(self, form): - messages.success(self.request, 'Name saved.') + messages.success(self.request, 'Account details saved.') return super().form_valid(form) def get_object(self, queryset=None):
{"golden_diff": "diff --git a/nextcloudappstore/core/user/views.py b/nextcloudappstore/core/user/views.py\n--- a/nextcloudappstore/core/user/views.py\n+++ b/nextcloudappstore/core/user/views.py\n@@ -43,7 +43,7 @@\n template_name = 'user/account.html'\n template_name_suffix = ''\n model = User\n- fields = ['first_name', 'last_name']\n+ fields = ['first_name', 'last_name', 'email']\n success_url = reverse_lazy('user:account')\n \n def get_context_data(self, **kwargs):\n@@ -52,7 +52,7 @@\n return context\n \n def form_valid(self, form):\n- messages.success(self.request, 'Name saved.')\n+ messages.success(self.request, 'Account details saved.')\n return super().form_valid(form)\n \n def get_object(self, queryset=None):\n", "issue": "Email change form\nAmong the account pages should be a page from which a user can change their email address. The email field on the `User` model needs to be updated and a correspondig django-allauth `EmailAddress` object should be associated with the user. django-allauth supports multiple email addresses per user, but we only want one. New email addresses should be verified with [this](http://django-allauth.readthedocs.io/en/latest/views.html#e-mail-verification). The page should be located at `/account/email/`.\n\n@BernhardPosselt Do you agree with the above?\n\n", "before_files": [{"content": "from allauth.account.views import PasswordChangeView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import redirect, render\nfrom django.views.generic import TemplateView\nfrom django.views.generic import UpdateView\n\nfrom nextcloudappstore.core.user.forms import DeleteAccountForm\n\n\nclass ChangeLanguageView(LoginRequiredMixin, TemplateView):\n template_name = 'user/set-language.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account-change-language'\n return context\n\n\nclass DeleteAccountView(LoginRequiredMixin, TemplateView):\n template_name = 'user/delete-account.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = DeleteAccountForm()\n context['acc_page'] = 'delete-account'\n return context\n\n def post(self, request, *args, **kwargs):\n form = DeleteAccountForm(request.POST, user=request.user)\n if form.is_valid():\n request.user.delete()\n return redirect(reverse_lazy('home'))\n else:\n return render(request, self.template_name, {'form': form})\n\n\nclass AccountView(LoginRequiredMixin, UpdateView):\n \"\"\"Display and allow changing of the user's name.\"\"\"\n\n template_name = 'user/account.html'\n template_name_suffix = ''\n model = User\n fields = ['first_name', 'last_name']\n success_url = reverse_lazy('user:account')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account'\n return context\n\n def form_valid(self, form):\n messages.success(self.request, 'Name saved.')\n return super().form_valid(form)\n\n def get_object(self, queryset=None):\n return self.request.user\n\n\nclass PasswordView(LoginRequiredMixin, PasswordChangeView):\n \"\"\"Allow the user to change their password.\"\"\"\n\n template_name = 'user/password.html'\n success_url = reverse_lazy('user:account-password')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'password'\n return context\n\n\nclass APITokenView(LoginRequiredMixin, TemplateView):\n \"\"\"Display the user's API token, and allow it to be regenerated.\"\"\"\n\n template_name = 'user/api-token.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'api-token'\n return context\n", "path": "nextcloudappstore/core/user/views.py"}]}
1,396
195
gh_patches_debug_53934
rasdani/github-patches
git_diff
bokeh__bokeh-3570
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ImageURL example in reference guide is broken The example doesn't render an image, just a gridded, empty plot. http://bokeh.pydata.org/en/latest/docs/reference/models/glyphs.html#bokeh.models.glyphs.ImageURL </issue> <code> [start of examples/glyphs/image_url.py] 1 2 import numpy as np 3 4 from bokeh.util.browser import view 5 from bokeh.document import Document 6 from bokeh.embed import file_html 7 from bokeh.models.glyphs import ImageURL 8 from bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid 9 from bokeh.resources import INLINE 10 11 url = "http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png" 12 N = 5 13 14 source = ColumnDataSource(dict( 15 url = [url]*N, 16 x1 = np.linspace( 0, 150, N), 17 y1 = np.linspace( 0, 150, N), 18 w1 = np.linspace( 10, 50, N), 19 h1 = np.linspace( 10, 50, N), 20 x2 = np.linspace(-50, 150, N), 21 y2 = np.linspace( 0, 200, N), 22 )) 23 24 xdr = Range1d(start=-100, end=200) 25 ydr = Range1d(start=-100, end=200) 26 27 plot = Plot(title="ImageURL", x_range=xdr, y_range=ydr) 28 29 image1 = ImageURL(url="url", x="x1", y="y1", w="w1", h="h1", anchor="center", global_alpha=0.2) 30 plot.add_glyph(source, image1) 31 32 image2 = ImageURL(url="url", x="x2", y="y2", w=20, h=20, anchor="top_left") 33 plot.add_glyph(source, image2) 34 35 image3 = ImageURL(url=dict(value=url), x=200, y=-100, anchor="bottom_right") 36 plot.add_glyph(source, image3) 37 38 xaxis = LinearAxis() 39 plot.add_layout(xaxis, 'below') 40 41 yaxis = LinearAxis() 42 plot.add_layout(yaxis,'left') 43 44 plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker)) 45 plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker)) 46 47 doc = Document( ) 48 doc.add_root(plot) 49 50 if __name__ == "__main__": 51 filename = "image_url.html" 52 with open(filename, "w") as f: 53 f.write(file_html(doc, INLINE, "Image URL Example")) 54 print("Wrote %s" % filename) 55 view(filename) 56 [end of examples/glyphs/image_url.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/glyphs/image_url.py b/examples/glyphs/image_url.py --- a/examples/glyphs/image_url.py +++ b/examples/glyphs/image_url.py @@ -8,7 +8,7 @@ from bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid from bokeh.resources import INLINE -url = "http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png" +url = "http://bokeh.pydata.org/en/latest/_static/images/logo.png" N = 5 source = ColumnDataSource(dict(
{"golden_diff": "diff --git a/examples/glyphs/image_url.py b/examples/glyphs/image_url.py\n--- a/examples/glyphs/image_url.py\n+++ b/examples/glyphs/image_url.py\n@@ -8,7 +8,7 @@\n from bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid\n from bokeh.resources import INLINE\n \n-url = \"http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png\"\n+url = \"http://bokeh.pydata.org/en/latest/_static/images/logo.png\"\n N = 5\n \n source = ColumnDataSource(dict(\n", "issue": "ImageURL example in reference guide is broken\nThe example doesn't render an image, just a gridded, empty plot.\n\nhttp://bokeh.pydata.org/en/latest/docs/reference/models/glyphs.html#bokeh.models.glyphs.ImageURL\n\n", "before_files": [{"content": "\nimport numpy as np\n\nfrom bokeh.util.browser import view\nfrom bokeh.document import Document\nfrom bokeh.embed import file_html\nfrom bokeh.models.glyphs import ImageURL\nfrom bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid\nfrom bokeh.resources import INLINE\n\nurl = \"http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png\"\nN = 5\n\nsource = ColumnDataSource(dict(\n url = [url]*N,\n x1 = np.linspace( 0, 150, N),\n y1 = np.linspace( 0, 150, N),\n w1 = np.linspace( 10, 50, N),\n h1 = np.linspace( 10, 50, N),\n x2 = np.linspace(-50, 150, N),\n y2 = np.linspace( 0, 200, N),\n))\n\nxdr = Range1d(start=-100, end=200)\nydr = Range1d(start=-100, end=200)\n\nplot = Plot(title=\"ImageURL\", x_range=xdr, y_range=ydr)\n\nimage1 = ImageURL(url=\"url\", x=\"x1\", y=\"y1\", w=\"w1\", h=\"h1\", anchor=\"center\", global_alpha=0.2)\nplot.add_glyph(source, image1)\n\nimage2 = ImageURL(url=\"url\", x=\"x2\", y=\"y2\", w=20, h=20, anchor=\"top_left\")\nplot.add_glyph(source, image2)\n\nimage3 = ImageURL(url=dict(value=url), x=200, y=-100, anchor=\"bottom_right\")\nplot.add_glyph(source, image3)\n\nxaxis = LinearAxis()\nplot.add_layout(xaxis, 'below')\n\nyaxis = LinearAxis()\nplot.add_layout(yaxis,'left')\n\nplot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\nplot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\ndoc = Document( )\ndoc.add_root(plot)\n\nif __name__ == \"__main__\":\n filename = \"image_url.html\"\n with open(filename, \"w\") as f:\n f.write(file_html(doc, INLINE, \"Image URL Example\"))\n print(\"Wrote %s\" % filename)\n view(filename)\n", "path": "examples/glyphs/image_url.py"}]}
1,231
127
gh_patches_debug_41745
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-1051
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Modify elasticdl.layers.Embedding arguments and constructor according to design doc According to [design doc](https://github.com/wangkuiyi/elasticdl/blob/develop/elasticdl/doc/distributed_embedding_layer_design.md#elasticdllayersembedding): ``` __init__( output_dim, embeddings_initializer='uniform', mask_zero=False, input_length=None, combiner=None, ) ``` </issue> <code> [start of elasticdl/python/elasticdl/layers/embedding.py] 1 import tensorflow as tf 2 from tensorflow.python.keras.utils import tf_utils 3 4 5 class Embedding(tf.keras.layers.Layer): 6 """ 7 Input: indexes for the embedding entries 8 shape is (batch_size, input_length) 9 Output: Corresponding embedding vectors of the input indexes 10 shape is (batch_size, input_length, embedding_dim) 11 Arguments: 12 embedding_dim: the dimension of the embedding vector 13 embedding_initializer: Initializer for embedding table 14 """ 15 16 def __init__(self, embedding_dim, embedding_initializer="uniform"): 17 super(Embedding, self).__init__() 18 self.embedding_dim = embedding_dim 19 self.embedding_initializer = embedding_initializer 20 self.tape = None 21 self.worker = None 22 self.bet_ids_pair = [] 23 24 @tf_utils.shape_type_conversion 25 def compute_output_shape(self, input_shape): 26 return input_shape + (self.embedding_dim,) 27 28 @property 29 def name(self): 30 return self._name 31 32 @staticmethod 33 def get_key(name_list): 34 return "-".join(map(str, name_list)) 35 36 def lookup_embedding(self, unique_ids): 37 batch_embedding = self.worker.embedding_lookup( 38 unique_ids, self._name, self.embedding_initializer 39 ) 40 return batch_embedding 41 42 def call(self, input): 43 ids = tf.convert_to_tensor(input, name="embedding_ids") 44 flat_ids = tf.reshape(ids, [-1]) 45 unique_ids, idx = tf.unique(flat_ids) 46 batch_embedding_tensor = tf.py_function( 47 self.lookup_embedding, inp=[unique_ids], Tout=tf.float32 48 ) 49 if self.tape: 50 # tape.watch works with eager mode only 51 if not tf.executing_eagerly(): 52 raise RuntimeError("tape.watch only works with eager mode") 53 self.tape.watch(batch_embedding_tensor) 54 self.bet_ids_pair.append((batch_embedding_tensor, unique_ids)) 55 outputs = tf.gather(batch_embedding_tensor, idx) 56 outputs = tf.reshape( 57 outputs, ids.get_shape().concatenate(self.embedding_dim) 58 ) 59 return outputs 60 61 def reset(self): 62 self.bet_ids_pair = [] 63 self.tape = None 64 65 def set_tape(self, tape): 66 self.tape = tape 67 68 def set_worker(self, worker): 69 self.worker = worker 70 [end of elasticdl/python/elasticdl/layers/embedding.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/elasticdl/python/elasticdl/layers/embedding.py b/elasticdl/python/elasticdl/layers/embedding.py --- a/elasticdl/python/elasticdl/layers/embedding.py +++ b/elasticdl/python/elasticdl/layers/embedding.py @@ -6,24 +6,77 @@ """ Input: indexes for the embedding entries shape is (batch_size, input_length) - Output: Corresponding embedding vectors of the input indexes - shape is (batch_size, input_length, embedding_dim) + Output: + corresponding (combined) embeddings with a shape of + (batch_size, input_length, output_dim) if combiner is None + (batch_size, output_dim) if combiner is not None Arguments: - embedding_dim: the dimension of the embedding vector + output_dim: the dimension of the embedding vector embedding_initializer: Initializer for embedding table + mask_zero: Whether or not the input value 0 is a special "padding" + value that should be masked out. + input_length: Length of input sequences, when it is constant. + This argument is required if you are going to connect + `Flatten` then `Dense` layers upstream + (without it, the shape of the dense outputs cannot be computed). + combiner: A string specifying the reduction op or None if not used. + "mean", "sqrtn" and "sum" are supported for the reduction op. + TODO: support mask_zero + TODO: support combiner + TODO: support sparse input """ - def __init__(self, embedding_dim, embedding_initializer="uniform"): - super(Embedding, self).__init__() - self.embedding_dim = embedding_dim + def __init__( + self, + output_dim, + embedding_initializer="uniform", + mask_zero=False, + input_length=None, + combiner=None, + **kwargs + ): + if "input_shape" not in kwargs and input_length: + kwargs["input_shape"] = (input_length,) + super(Embedding, self).__init__(**kwargs) + + self.output_dim = output_dim self.embedding_initializer = embedding_initializer + self.mask_zero = mask_zero + self.input_length = input_length + self.combiner = combiner self.tape = None self.worker = None self.bet_ids_pair = [] @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): - return input_shape + (self.embedding_dim,) + # this function is taken from + # tf.keras.layers.Embedding.compute_output_shape + # https://github.com/tensorflow/tensorflow/blob/3f3c728bf80e0fd6653744318cbbfe1454c6ddca/tensorflow/python/keras/layers/embeddings.py#L156 + if self.input_length is None: + return input_shape + (self.output_dim,) + else: + if isinstance(self.input_length, (list, tuple)): + in_lens = list(self.input_length) + else: + in_lens = [self.input_length] + if len(in_lens) != len(input_shape) - 1: + raise ValueError( + '"input_length" is %s, ' + "but received input has shape %s" + % (str(self.input_length), str(input_shape)) + ) + else: + for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])): + if s1 is not None and s2 is not None and s1 != s2: + raise ValueError( + '"input_length" is %s, ' + "but received input has shape %s" + % (str(self.input_length), str(input_shape)) + ) + elif s1 is None: + in_lens[i] = s2 + return (input_shape[0],) + tuple(in_lens) + (self.output_dim,) @property def name(self): @@ -54,7 +107,7 @@ self.bet_ids_pair.append((batch_embedding_tensor, unique_ids)) outputs = tf.gather(batch_embedding_tensor, idx) outputs = tf.reshape( - outputs, ids.get_shape().concatenate(self.embedding_dim) + outputs, ids.get_shape().concatenate(self.output_dim) ) return outputs
{"golden_diff": "diff --git a/elasticdl/python/elasticdl/layers/embedding.py b/elasticdl/python/elasticdl/layers/embedding.py\n--- a/elasticdl/python/elasticdl/layers/embedding.py\n+++ b/elasticdl/python/elasticdl/layers/embedding.py\n@@ -6,24 +6,77 @@\n \"\"\"\n Input: indexes for the embedding entries\n shape is (batch_size, input_length)\n- Output: Corresponding embedding vectors of the input indexes\n- shape is (batch_size, input_length, embedding_dim)\n+ Output:\n+ corresponding (combined) embeddings with a shape of\n+ (batch_size, input_length, output_dim) if combiner is None\n+ (batch_size, output_dim) if combiner is not None\n Arguments:\n- embedding_dim: the dimension of the embedding vector\n+ output_dim: the dimension of the embedding vector\n embedding_initializer: Initializer for embedding table\n+ mask_zero: Whether or not the input value 0 is a special \"padding\"\n+ value that should be masked out.\n+ input_length: Length of input sequences, when it is constant.\n+ This argument is required if you are going to connect\n+ `Flatten` then `Dense` layers upstream\n+ (without it, the shape of the dense outputs cannot be computed).\n+ combiner: A string specifying the reduction op or None if not used.\n+ \"mean\", \"sqrtn\" and \"sum\" are supported for the reduction op.\n+ TODO: support mask_zero\n+ TODO: support combiner\n+ TODO: support sparse input\n \"\"\"\n \n- def __init__(self, embedding_dim, embedding_initializer=\"uniform\"):\n- super(Embedding, self).__init__()\n- self.embedding_dim = embedding_dim\n+ def __init__(\n+ self,\n+ output_dim,\n+ embedding_initializer=\"uniform\",\n+ mask_zero=False,\n+ input_length=None,\n+ combiner=None,\n+ **kwargs\n+ ):\n+ if \"input_shape\" not in kwargs and input_length:\n+ kwargs[\"input_shape\"] = (input_length,)\n+ super(Embedding, self).__init__(**kwargs)\n+\n+ self.output_dim = output_dim\n self.embedding_initializer = embedding_initializer\n+ self.mask_zero = mask_zero\n+ self.input_length = input_length\n+ self.combiner = combiner\n self.tape = None\n self.worker = None\n self.bet_ids_pair = []\n \n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n- return input_shape + (self.embedding_dim,)\n+ # this function is taken from\n+ # tf.keras.layers.Embedding.compute_output_shape\n+ # https://github.com/tensorflow/tensorflow/blob/3f3c728bf80e0fd6653744318cbbfe1454c6ddca/tensorflow/python/keras/layers/embeddings.py#L156\n+ if self.input_length is None:\n+ return input_shape + (self.output_dim,)\n+ else:\n+ if isinstance(self.input_length, (list, tuple)):\n+ in_lens = list(self.input_length)\n+ else:\n+ in_lens = [self.input_length]\n+ if len(in_lens) != len(input_shape) - 1:\n+ raise ValueError(\n+ '\"input_length\" is %s, '\n+ \"but received input has shape %s\"\n+ % (str(self.input_length), str(input_shape))\n+ )\n+ else:\n+ for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):\n+ if s1 is not None and s2 is not None and s1 != s2:\n+ raise ValueError(\n+ '\"input_length\" is %s, '\n+ \"but received input has shape %s\"\n+ % (str(self.input_length), str(input_shape))\n+ )\n+ elif s1 is None:\n+ in_lens[i] = s2\n+ return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)\n \n @property\n def name(self):\n@@ -54,7 +107,7 @@\n self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))\n outputs = tf.gather(batch_embedding_tensor, idx)\n outputs = tf.reshape(\n- outputs, ids.get_shape().concatenate(self.embedding_dim)\n+ outputs, ids.get_shape().concatenate(self.output_dim)\n )\n return outputs\n", "issue": "Modify elasticdl.layers.Embedding arguments and constructor according to design doc\nAccording to [design doc](https://github.com/wangkuiyi/elasticdl/blob/develop/elasticdl/doc/distributed_embedding_layer_design.md#elasticdllayersembedding):\r\n\r\n```\r\n__init__(\r\n output_dim,\r\n embeddings_initializer='uniform',\r\n mask_zero=False,\r\n input_length=None,\r\n combiner=None,\r\n)\r\n```\n", "before_files": [{"content": "import tensorflow as tf\nfrom tensorflow.python.keras.utils import tf_utils\n\n\nclass Embedding(tf.keras.layers.Layer):\n \"\"\"\n Input: indexes for the embedding entries\n shape is (batch_size, input_length)\n Output: Corresponding embedding vectors of the input indexes\n shape is (batch_size, input_length, embedding_dim)\n Arguments:\n embedding_dim: the dimension of the embedding vector\n embedding_initializer: Initializer for embedding table\n \"\"\"\n\n def __init__(self, embedding_dim, embedding_initializer=\"uniform\"):\n super(Embedding, self).__init__()\n self.embedding_dim = embedding_dim\n self.embedding_initializer = embedding_initializer\n self.tape = None\n self.worker = None\n self.bet_ids_pair = []\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape + (self.embedding_dim,)\n\n @property\n def name(self):\n return self._name\n\n @staticmethod\n def get_key(name_list):\n return \"-\".join(map(str, name_list))\n\n def lookup_embedding(self, unique_ids):\n batch_embedding = self.worker.embedding_lookup(\n unique_ids, self._name, self.embedding_initializer\n )\n return batch_embedding\n\n def call(self, input):\n ids = tf.convert_to_tensor(input, name=\"embedding_ids\")\n flat_ids = tf.reshape(ids, [-1])\n unique_ids, idx = tf.unique(flat_ids)\n batch_embedding_tensor = tf.py_function(\n self.lookup_embedding, inp=[unique_ids], Tout=tf.float32\n )\n if self.tape:\n # tape.watch works with eager mode only\n if not tf.executing_eagerly():\n raise RuntimeError(\"tape.watch only works with eager mode\")\n self.tape.watch(batch_embedding_tensor)\n self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))\n outputs = tf.gather(batch_embedding_tensor, idx)\n outputs = tf.reshape(\n outputs, ids.get_shape().concatenate(self.embedding_dim)\n )\n return outputs\n\n def reset(self):\n self.bet_ids_pair = []\n self.tape = None\n\n def set_tape(self, tape):\n self.tape = tape\n\n def set_worker(self, worker):\n self.worker = worker\n", "path": "elasticdl/python/elasticdl/layers/embedding.py"}]}
1,255
1,009
gh_patches_debug_28888
rasdani/github-patches
git_diff
chainer__chainer-1376
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> GPU implementation of transpose_sequence is too slow As @jnory mentioned in #1174, `transpose_sequence` is too slow. We need to make a single kernel to make a transposed matrix. </issue> <code> [start of chainer/functions/array/transpose_sequence.py] 1 import numpy 2 3 from chainer import cuda 4 from chainer import function 5 from chainer.utils import type_check 6 7 8 def _transpose(xs, length): 9 xp = cuda.get_array_module(*xs) 10 lengths = numpy.zeros(length, dtype='i') 11 for i, x in enumerate(xs): 12 lengths[0:len(x)] = i + 1 13 dtype = xs[0].dtype 14 unit = xs[0].shape[1:] 15 outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths]) 16 17 for i, x in enumerate(xs): 18 for p, xi in enumerate(x): 19 outs[p][i] = xi 20 21 return outs 22 23 24 class TransposeSequence(function.Function): 25 26 """Function that transposes a list of Variables.""" 27 28 def check_type_forward(self, xs_type): 29 for p, n in zip(xs_type, xs_type[1:]): 30 type_check.expect( 31 p.shape[0] >= n.shape[0], 32 p.shape[1:] == n.shape[1:], 33 ) 34 35 def forward(self, xs): 36 if len(xs) == 0: 37 return () 38 return _transpose(xs, len(xs[0])) 39 40 def backward(self, xs, gs): 41 return _transpose(gs, len(xs)) 42 43 44 def transpose_sequence(xs): 45 """Transpose a list of Variables. 46 47 This function transposes a list of :class:`~chainer.Variable` s and returns 48 a list of :class:`Variable` s. 49 For exampe a user gives ``[(0, 1, 2, 3), (4, 5), (6)]``, the function 50 returns ``[(0, 4, 6), (1, 5), (2), (3)]``. 51 Note that a given list needs to be sorted by each length of 52 :class:`~chainer.Variable`. 53 54 Args: 55 xs (list of ~chainer.Variable): Variables to transpose. 56 57 Returns: 58 tuple or Variable: Transposed list. 59 """ 60 ys = TransposeSequence()(*xs) 61 if not isinstance(ys, tuple): 62 ys = (ys,) 63 return ys 64 [end of chainer/functions/array/transpose_sequence.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/functions/array/transpose_sequence.py b/chainer/functions/array/transpose_sequence.py --- a/chainer/functions/array/transpose_sequence.py +++ b/chainer/functions/array/transpose_sequence.py @@ -6,17 +6,55 @@ def _transpose(xs, length): - xp = cuda.get_array_module(*xs) - lengths = numpy.zeros(length, dtype='i') - for i, x in enumerate(xs): - lengths[0:len(x)] = i + 1 - dtype = xs[0].dtype - unit = xs[0].shape[1:] - outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths]) + if length == 0: + return () + xp = cuda.get_array_module(*xs) + lengths = numpy.empty(length, dtype='i') + end = length for i, x in enumerate(xs): - for p, xi in enumerate(x): - outs[p][i] = xi + lengths[len(x):end] = i + end = len(x) + lengths[0:end] = len(xs) + + if xp is numpy: + dtype = xs[0].dtype + unit = xs[0].shape[1:] + + outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths]) + for i, x in enumerate(xs): + for p, xi in enumerate(x): + outs[p][i] = xi + + else: + offsets1 = numpy.empty(len(xs) + 1, dtype='i') + offsets1[0] = 0 + numpy.cumsum([len(x) for x in xs], out=offsets1[1:]) + + offsets2 = numpy.empty(length + 1, dtype='i') + offsets2[0] = 0 + numpy.cumsum(lengths, dtype='i', out=offsets2[1:]) + + x = xp.concatenate(xs, axis=0) + o = xp.empty_like(x) + unit = xs[0].size // len(xs[0]) + size = length * len(xs) * unit + cuda.elementwise( + 'int32 len, int32 unit, raw int32 off1, raw int32 off2, raw T vs', + 'raw T hs', + ''' + int ind = i / unit; + int off = i - ind * unit; + int y = ind / len; + int x = ind - y * len; + if (off2[x] + y < off2[x + 1]) { + hs[(off2[x] + y) * unit + off] = vs[(off1[y] + x) * unit + off]; + } + ''', + 'transpose_sequence' + )(length, unit, cuda.to_gpu(offsets1), cuda.to_gpu(offsets2), x, o, + size=size) + outs = tuple(xp.split(o, offsets2[1:-1])) return outs
{"golden_diff": "diff --git a/chainer/functions/array/transpose_sequence.py b/chainer/functions/array/transpose_sequence.py\n--- a/chainer/functions/array/transpose_sequence.py\n+++ b/chainer/functions/array/transpose_sequence.py\n@@ -6,17 +6,55 @@\n \n \n def _transpose(xs, length):\n- xp = cuda.get_array_module(*xs)\n- lengths = numpy.zeros(length, dtype='i')\n- for i, x in enumerate(xs):\n- lengths[0:len(x)] = i + 1\n- dtype = xs[0].dtype\n- unit = xs[0].shape[1:]\n- outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])\n+ if length == 0:\n+ return ()\n \n+ xp = cuda.get_array_module(*xs)\n+ lengths = numpy.empty(length, dtype='i')\n+ end = length\n for i, x in enumerate(xs):\n- for p, xi in enumerate(x):\n- outs[p][i] = xi\n+ lengths[len(x):end] = i\n+ end = len(x)\n+ lengths[0:end] = len(xs)\n+\n+ if xp is numpy:\n+ dtype = xs[0].dtype\n+ unit = xs[0].shape[1:]\n+\n+ outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])\n+ for i, x in enumerate(xs):\n+ for p, xi in enumerate(x):\n+ outs[p][i] = xi\n+\n+ else:\n+ offsets1 = numpy.empty(len(xs) + 1, dtype='i')\n+ offsets1[0] = 0\n+ numpy.cumsum([len(x) for x in xs], out=offsets1[1:])\n+\n+ offsets2 = numpy.empty(length + 1, dtype='i')\n+ offsets2[0] = 0\n+ numpy.cumsum(lengths, dtype='i', out=offsets2[1:])\n+\n+ x = xp.concatenate(xs, axis=0)\n+ o = xp.empty_like(x)\n+ unit = xs[0].size // len(xs[0])\n+ size = length * len(xs) * unit\n+ cuda.elementwise(\n+ 'int32 len, int32 unit, raw int32 off1, raw int32 off2, raw T vs',\n+ 'raw T hs',\n+ '''\n+ int ind = i / unit;\n+ int off = i - ind * unit;\n+ int y = ind / len;\n+ int x = ind - y * len;\n+ if (off2[x] + y < off2[x + 1]) {\n+ hs[(off2[x] + y) * unit + off] = vs[(off1[y] + x) * unit + off];\n+ }\n+ ''',\n+ 'transpose_sequence'\n+ )(length, unit, cuda.to_gpu(offsets1), cuda.to_gpu(offsets2), x, o,\n+ size=size)\n+ outs = tuple(xp.split(o, offsets2[1:-1]))\n \n return outs\n", "issue": "GPU implementation of transpose_sequence is too slow\nAs @jnory mentioned in #1174, `transpose_sequence` is too slow. We need to make a single kernel to make a transposed matrix.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\ndef _transpose(xs, length):\n xp = cuda.get_array_module(*xs)\n lengths = numpy.zeros(length, dtype='i')\n for i, x in enumerate(xs):\n lengths[0:len(x)] = i + 1\n dtype = xs[0].dtype\n unit = xs[0].shape[1:]\n outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])\n\n for i, x in enumerate(xs):\n for p, xi in enumerate(x):\n outs[p][i] = xi\n\n return outs\n\n\nclass TransposeSequence(function.Function):\n\n \"\"\"Function that transposes a list of Variables.\"\"\"\n\n def check_type_forward(self, xs_type):\n for p, n in zip(xs_type, xs_type[1:]):\n type_check.expect(\n p.shape[0] >= n.shape[0],\n p.shape[1:] == n.shape[1:],\n )\n\n def forward(self, xs):\n if len(xs) == 0:\n return ()\n return _transpose(xs, len(xs[0]))\n\n def backward(self, xs, gs):\n return _transpose(gs, len(xs))\n\n\ndef transpose_sequence(xs):\n \"\"\"Transpose a list of Variables.\n\n This function transposes a list of :class:`~chainer.Variable` s and returns\n a list of :class:`Variable` s.\n For exampe a user gives ``[(0, 1, 2, 3), (4, 5), (6)]``, the function\n returns ``[(0, 4, 6), (1, 5), (2), (3)]``.\n Note that a given list needs to be sorted by each length of\n :class:`~chainer.Variable`.\n\n Args:\n xs (list of ~chainer.Variable): Variables to transpose.\n\n Returns:\n tuple or Variable: Transposed list.\n \"\"\"\n ys = TransposeSequence()(*xs)\n if not isinstance(ys, tuple):\n ys = (ys,)\n return ys\n", "path": "chainer/functions/array/transpose_sequence.py"}]}
1,175
696
gh_patches_debug_622
rasdani/github-patches
git_diff
pex-tool__pex-1859
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.100 On the docket: + [x] Using --target-system linux --target-system mac can still lead to failed attempts to lock Windows requirements. #1856 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.99" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.99" +__version__ = "2.1.100"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.99\"\n+__version__ = \"2.1.100\"\n", "issue": "Release 2.1.100\nOn the docket:\r\n+ [x] Using --target-system linux --target-system mac can still lead to failed attempts to lock Windows requirements. #1856\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.99\"\n", "path": "pex/version.py"}]}
628
98
gh_patches_debug_16027
rasdani/github-patches
git_diff
SigmaHQ__sigma-1278
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update sigma2attack to the latest navigator version the data generated for navigator is not up to date. ![Clipboard - 5 novembre 2020 15_38](https://user-images.githubusercontent.com/1626464/98255016-3675e080-1f7d-11eb-8490-321a7a053c04.png) specs on [att&ck navigator github](https://github.com/mitre-attack/attack-navigator/blob/master/layers/LAYERFORMATv4.md) </issue> <code> [start of tools/sigma/sigma2attack.py] 1 #!/usr/bin/env python3 2 3 import argparse 4 import glob 5 import json 6 import os 7 import sys 8 9 import yaml 10 11 def main(): 12 parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 13 parser.add_argument("--rules-directory", "-d", dest="rules_dir", default="rules", help="Directory to read rules from") 14 parser.add_argument("--out-file", "-o", dest="out_file", default="heatmap.json", help="File to write the JSON layer to") 15 parser.add_argument("--no-comment", dest="no_comment", action="store_true", help="Don't store rule names in comments") 16 args = parser.parse_args() 17 18 rule_files = glob.glob(os.path.join(args.rules_dir, "**/*.yml"), recursive=True) 19 techniques_to_rules = {} 20 curr_max_technique_count = 0 21 num_rules_used = 0 22 for rule_file in rule_files: 23 try: 24 rule = yaml.safe_load(open(rule_file).read()) 25 except yaml.YAMLError: 26 sys.stderr.write("Ignoring rule " + rule_file + " (parsing failed)\n") 27 continue 28 if "tags" not in rule: 29 sys.stderr.write("Ignoring rule " + rule_file + " (no tags)\n") 30 continue 31 tags = rule["tags"] 32 for tag in tags: 33 if tag.lower().startswith("attack.t"): 34 technique_id = tag[len("attack."):].upper() 35 num_rules_used += 1 36 if technique_id not in techniques_to_rules: 37 techniques_to_rules[technique_id] = [] 38 techniques_to_rules[technique_id].append(os.path.basename(rule_file)) 39 curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id])) 40 41 42 scores = [] 43 for technique in techniques_to_rules: 44 entry = { 45 "techniqueID": technique, 46 "score": len(techniques_to_rules[technique]), 47 } 48 if not args.no_comment: 49 entry["comment"] = "\n".join(techniques_to_rules[technique]) 50 51 scores.append(entry) 52 53 output = { 54 "domain": "mitre-enterprise", 55 "name": "Sigma rules heatmap", 56 "gradient": { 57 "colors": [ 58 "#ffffff", 59 "#ff6666" 60 ], 61 "maxValue": curr_max_technique_count, 62 "minValue": 0 63 }, 64 "version": "2.2", 65 "techniques": scores, 66 } 67 68 with open(args.out_file, "w") as f: 69 f.write(json.dumps(output)) 70 print("[*] Layer file written in " + args.out_file + " (" + str(num_rules_used) + " rules)") 71 72 if __name__ == "__main__": 73 main() 74 [end of tools/sigma/sigma2attack.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/sigma/sigma2attack.py b/tools/sigma/sigma2attack.py --- a/tools/sigma/sigma2attack.py +++ b/tools/sigma/sigma2attack.py @@ -21,7 +21,7 @@ num_rules_used = 0 for rule_file in rule_files: try: - rule = yaml.safe_load(open(rule_file).read()) + rule = yaml.safe_load(open(rule_file, encoding="utf-8").read()) except yaml.YAMLError: sys.stderr.write("Ignoring rule " + rule_file + " (parsing failed)\n") continue @@ -61,7 +61,10 @@ "maxValue": curr_max_technique_count, "minValue": 0 }, - "version": "2.2", + "versions": { + "navigator": "4.0", + "layer": "4.0" + }, "techniques": scores, }
{"golden_diff": "diff --git a/tools/sigma/sigma2attack.py b/tools/sigma/sigma2attack.py\n--- a/tools/sigma/sigma2attack.py\n+++ b/tools/sigma/sigma2attack.py\n@@ -21,7 +21,7 @@\n num_rules_used = 0\n for rule_file in rule_files:\n try:\n- rule = yaml.safe_load(open(rule_file).read())\n+ rule = yaml.safe_load(open(rule_file, encoding=\"utf-8\").read())\n except yaml.YAMLError:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (parsing failed)\\n\")\n continue\n@@ -61,7 +61,10 @@\n \"maxValue\": curr_max_technique_count,\n \"minValue\": 0\n },\n- \"version\": \"2.2\", \n+ \"versions\": {\n+ \"navigator\": \"4.0\",\n+ \"layer\": \"4.0\"\n+ },\n \"techniques\": scores,\n }\n", "issue": "Update sigma2attack to the latest navigator version\nthe data generated for navigator is not up to date.\r\n\r\n![Clipboard - 5 novembre 2020 15_38](https://user-images.githubusercontent.com/1626464/98255016-3675e080-1f7d-11eb-8490-321a7a053c04.png)\r\n\r\nspecs on [att&ck navigator github](https://github.com/mitre-attack/attack-navigator/blob/master/layers/LAYERFORMATv4.md)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport glob\nimport json\nimport os\nimport sys\n\nimport yaml\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--rules-directory\", \"-d\", dest=\"rules_dir\", default=\"rules\", help=\"Directory to read rules from\")\n parser.add_argument(\"--out-file\", \"-o\", dest=\"out_file\", default=\"heatmap.json\", help=\"File to write the JSON layer to\")\n parser.add_argument(\"--no-comment\", dest=\"no_comment\", action=\"store_true\", help=\"Don't store rule names in comments\")\n args = parser.parse_args()\n\n rule_files = glob.glob(os.path.join(args.rules_dir, \"**/*.yml\"), recursive=True)\n techniques_to_rules = {}\n curr_max_technique_count = 0\n num_rules_used = 0\n for rule_file in rule_files:\n try:\n rule = yaml.safe_load(open(rule_file).read())\n except yaml.YAMLError:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (parsing failed)\\n\")\n continue\n if \"tags\" not in rule:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (no tags)\\n\")\n continue\n tags = rule[\"tags\"]\n for tag in tags:\n if tag.lower().startswith(\"attack.t\"):\n technique_id = tag[len(\"attack.\"):].upper()\n num_rules_used += 1\n if technique_id not in techniques_to_rules:\n techniques_to_rules[technique_id] = []\n techniques_to_rules[technique_id].append(os.path.basename(rule_file))\n curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))\n\n\n scores = []\n for technique in techniques_to_rules:\n entry = {\n \"techniqueID\": technique, \n \"score\": len(techniques_to_rules[technique]), \n }\n if not args.no_comment:\n entry[\"comment\"] = \"\\n\".join(techniques_to_rules[technique])\n\n scores.append(entry)\n\n output = {\n \"domain\": \"mitre-enterprise\",\n \"name\": \"Sigma rules heatmap\",\n \"gradient\": {\n \"colors\": [\n \"#ffffff\",\n \"#ff6666\"\n ],\n \"maxValue\": curr_max_technique_count,\n \"minValue\": 0\n },\n \"version\": \"2.2\", \n \"techniques\": scores,\n }\n\n with open(args.out_file, \"w\") as f:\n f.write(json.dumps(output))\n print(\"[*] Layer file written in \" + args.out_file + \" (\" + str(num_rules_used) + \" rules)\")\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/sigma/sigma2attack.py"}]}
1,420
222
gh_patches_debug_21522
rasdani/github-patches
git_diff
svthalia__concrexit-3070
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Expand/improve document part of event endpoint <!-- Please add the appropriate label for what change should be made: docs: changes to the documentation) refactor: refactoring production code, eg. renaming a variable or rewriting a function test: adding missing tests, refactoring tests; no production code change chore: updating poetry etc; no production code change --> ### Describe the change Change the current URL or add a URL to a link to the actual document (that is, one that ends with `/[name].pdf`). ### Motivation It's very nice for the app to be able to just fetch files directly. Also, I don't think the current URL is even necessary because you also have the PK with which you can create the URL. ### Current implementation There is a URL that leads to the file, but only after a redirect to `cdn`. </issue> <code> [start of website/documents/api/v2/serializers/document.py] 1 from rest_framework.fields import SerializerMethodField 2 from rest_framework.reverse import reverse 3 4 from documents.models import Document 5 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import ( 6 CleanedModelSerializer, 7 ) 8 9 10 class DocumentSerializer(CleanedModelSerializer): 11 class Meta: 12 model = Document 13 fields = ("pk", "name", "url", "category", "members_only") 14 15 url = SerializerMethodField("_url") 16 17 def _url(self, instance): 18 return self.context["request"].build_absolute_uri( 19 reverse("documents:document", kwargs={"pk": instance.pk}) 20 ) 21 [end of website/documents/api/v2/serializers/document.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/documents/api/v2/serializers/document.py b/website/documents/api/v2/serializers/document.py --- a/website/documents/api/v2/serializers/document.py +++ b/website/documents/api/v2/serializers/document.py @@ -1,10 +1,10 @@ from rest_framework.fields import SerializerMethodField -from rest_framework.reverse import reverse from documents.models import Document from thaliawebsite.api.v2.serializers.cleaned_model_serializer import ( CleanedModelSerializer, ) +from utils.media.services import get_media_url class DocumentSerializer(CleanedModelSerializer): @@ -15,6 +15,12 @@ url = SerializerMethodField("_url") def _url(self, instance): - return self.context["request"].build_absolute_uri( - reverse("documents:document", kwargs={"pk": instance.pk}) - ) + if instance.members_only and ( + not self.context["request"].user.is_authenticated + or not self.context["request"].member.has_active_membership() + ): + return self.context["request"].build_absolute_uri( + instance.get_absolute_url() + ) + + return get_media_url(instance.file, absolute_url=True)
{"golden_diff": "diff --git a/website/documents/api/v2/serializers/document.py b/website/documents/api/v2/serializers/document.py\n--- a/website/documents/api/v2/serializers/document.py\n+++ b/website/documents/api/v2/serializers/document.py\n@@ -1,10 +1,10 @@\n from rest_framework.fields import SerializerMethodField\n-from rest_framework.reverse import reverse\n \n from documents.models import Document\n from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n )\n+from utils.media.services import get_media_url\n \n \n class DocumentSerializer(CleanedModelSerializer):\n@@ -15,6 +15,12 @@\n url = SerializerMethodField(\"_url\")\n \n def _url(self, instance):\n- return self.context[\"request\"].build_absolute_uri(\n- reverse(\"documents:document\", kwargs={\"pk\": instance.pk})\n- )\n+ if instance.members_only and (\n+ not self.context[\"request\"].user.is_authenticated\n+ or not self.context[\"request\"].member.has_active_membership()\n+ ):\n+ return self.context[\"request\"].build_absolute_uri(\n+ instance.get_absolute_url()\n+ )\n+\n+ return get_media_url(instance.file, absolute_url=True)\n", "issue": "Expand/improve document part of event endpoint\n<!--\r\n\r\nPlease add the appropriate label for what change should be made:\r\ndocs: changes to the documentation)\r\nrefactor: refactoring production code, eg. renaming a variable or rewriting a function\r\ntest: adding missing tests, refactoring tests; no production code change\r\nchore: updating poetry etc; no production code change\r\n\r\n-->\r\n\r\n### Describe the change\r\nChange the current URL or add a URL to a link to the actual document (that is, one that ends with `/[name].pdf`).\r\n\r\n### Motivation\r\nIt's very nice for the app to be able to just fetch files directly.\r\nAlso, I don't think the current URL is even necessary because you also have the PK with which you can create the URL.\r\n\r\n### Current implementation\r\nThere is a URL that leads to the file, but only after a redirect to `cdn`.\n", "before_files": [{"content": "from rest_framework.fields import SerializerMethodField\nfrom rest_framework.reverse import reverse\n\nfrom documents.models import Document\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\n\n\nclass DocumentSerializer(CleanedModelSerializer):\n class Meta:\n model = Document\n fields = (\"pk\", \"name\", \"url\", \"category\", \"members_only\")\n\n url = SerializerMethodField(\"_url\")\n\n def _url(self, instance):\n return self.context[\"request\"].build_absolute_uri(\n reverse(\"documents:document\", kwargs={\"pk\": instance.pk})\n )\n", "path": "website/documents/api/v2/serializers/document.py"}]}
890
267
gh_patches_debug_28907
rasdani/github-patches
git_diff
ansible__ansible-43525
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Missing all_content param in ovirt_hosts_facts ##### SUMMARY ovirt_hosts_facts misses the all_content param and so it's not possible to get back the whole host details. ovirt_vms_facts list, for instance, has it. ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME ovirt_hosts_facts ##### ANSIBLE VERSION ``` ansible 2.6.1 config file = /etc/ansible/ansible.cfg configured module search path = [u'/home/stirabos/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /usr/bin/ansible python version = 2.7.15 (default, May 16 2018, 17:50:09) [GCC 8.1.1 20180502 (Red Hat 8.1.1-1)] ``` ##### CONFIGURATION ##### OS / ENVIRONMENT N/A ##### STEPS TO REPRODUCE <!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case. For new features, show how the feature would be used. --> <!--- Paste example playbooks or commands between quotes below --> ```yaml - ovirt_vms_facts: auth: "{{ ovirt_auth }}" all_content: true - debug: var=ovirt_vms - ovirt_hosts_facts: auth: "{{ ovirt_auth }}" all_content: true - debug: var=ovirt_hosts ``` ##### EXPECTED RESULTS a list of hosts with full detail for each of them ##### ACTUAL RESULTS ``` TASK [ovirt_hosts_facts] ****************************************************************************************************************************************************************************************** fatal: [localhost]: FAILED! => {"changed": false, "msg": "Unsupported parameters for (ovirt_hosts_facts) module: all_content Supported parameters include: auth, fetch_nested, nested_attributes, pattern"} to retry, use: --limit @/root/test.retry ``` </issue> <code> [start of lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py] 1 #!/usr/bin/python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright (c) 2016 Red Hat, Inc. 5 # 6 # This file is part of Ansible 7 # 8 # Ansible is free software: you can redistribute it and/or modify 9 # it under the terms of the GNU General Public License as published by 10 # the Free Software Foundation, either version 3 of the License, or 11 # (at your option) any later version. 12 # 13 # Ansible is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU General Public License for more details. 17 # 18 # You should have received a copy of the GNU General Public License 19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 20 # 21 22 ANSIBLE_METADATA = {'metadata_version': '1.1', 23 'status': ['preview'], 24 'supported_by': 'community'} 25 26 27 DOCUMENTATION = ''' 28 --- 29 module: ovirt_hosts_facts 30 short_description: Retrieve facts about one or more oVirt/RHV hosts 31 author: "Ondra Machacek (@machacekondra)" 32 version_added: "2.3" 33 description: 34 - "Retrieve facts about one or more oVirt/RHV hosts." 35 notes: 36 - "This module creates a new top-level C(ovirt_hosts) fact, which 37 contains a list of hosts." 38 options: 39 pattern: 40 description: 41 - "Search term which is accepted by oVirt/RHV search backend." 42 - "For example to search host X from datacenter Y use following pattern: 43 name=X and datacenter=Y" 44 extends_documentation_fragment: ovirt_facts 45 ''' 46 47 EXAMPLES = ''' 48 # Examples don't contain auth parameter for simplicity, 49 # look at ovirt_auth module to see how to reuse authentication: 50 51 # Gather facts about all hosts which names start with C(host) and 52 # belong to data center C(west): 53 - ovirt_hosts_facts: 54 pattern: name=host* and datacenter=west 55 - debug: 56 var: ovirt_hosts 57 ''' 58 59 RETURN = ''' 60 ovirt_hosts: 61 description: "List of dictionaries describing the hosts. Host attribues are mapped to dictionary keys, 62 all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host." 63 returned: On success. 64 type: list 65 ''' 66 67 import traceback 68 69 from ansible.module_utils.basic import AnsibleModule 70 from ansible.module_utils.ovirt import ( 71 check_sdk, 72 create_connection, 73 get_dict_of_struct, 74 ovirt_facts_full_argument_spec, 75 ) 76 77 78 def main(): 79 argument_spec = ovirt_facts_full_argument_spec( 80 pattern=dict(default='', required=False), 81 ) 82 module = AnsibleModule(argument_spec) 83 check_sdk(module) 84 85 try: 86 auth = module.params.pop('auth') 87 connection = create_connection(auth) 88 hosts_service = connection.system_service().hosts_service() 89 hosts = hosts_service.list(search=module.params['pattern']) 90 module.exit_json( 91 changed=False, 92 ansible_facts=dict( 93 ovirt_hosts=[ 94 get_dict_of_struct( 95 struct=c, 96 connection=connection, 97 fetch_nested=module.params.get('fetch_nested'), 98 attributes=module.params.get('nested_attributes'), 99 ) for c in hosts 100 ], 101 ), 102 ) 103 except Exception as e: 104 module.fail_json(msg=str(e), exception=traceback.format_exc()) 105 finally: 106 connection.close(logout=auth.get('token') is None) 107 108 109 if __name__ == '__main__': 110 main() 111 [end of lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py --- a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py +++ b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py @@ -41,6 +41,12 @@ - "Search term which is accepted by oVirt/RHV search backend." - "For example to search host X from datacenter Y use following pattern: name=X and datacenter=Y" + all_content: + description: + - "If I(true) all the attributes of the hosts should be + included in the response." + default: False + version_added: "2.7" extends_documentation_fragment: ovirt_facts ''' @@ -78,6 +84,7 @@ def main(): argument_spec = ovirt_facts_full_argument_spec( pattern=dict(default='', required=False), + all_content=dict(default=False, type='bool'), ) module = AnsibleModule(argument_spec) check_sdk(module) @@ -86,7 +93,10 @@ auth = module.params.pop('auth') connection = create_connection(auth) hosts_service = connection.system_service().hosts_service() - hosts = hosts_service.list(search=module.params['pattern']) + hosts = hosts_service.list( + search=module.params['pattern'], + all_content=module.params['all_content'], + ) module.exit_json( changed=False, ansible_facts=dict(
{"golden_diff": "diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py\n--- a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py\n+++ b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py\n@@ -41,6 +41,12 @@\n - \"Search term which is accepted by oVirt/RHV search backend.\"\n - \"For example to search host X from datacenter Y use following pattern:\n name=X and datacenter=Y\"\n+ all_content:\n+ description:\n+ - \"If I(true) all the attributes of the hosts should be\n+ included in the response.\"\n+ default: False\n+ version_added: \"2.7\"\n extends_documentation_fragment: ovirt_facts\n '''\n \n@@ -78,6 +84,7 @@\n def main():\n argument_spec = ovirt_facts_full_argument_spec(\n pattern=dict(default='', required=False),\n+ all_content=dict(default=False, type='bool'),\n )\n module = AnsibleModule(argument_spec)\n check_sdk(module)\n@@ -86,7 +93,10 @@\n auth = module.params.pop('auth')\n connection = create_connection(auth)\n hosts_service = connection.system_service().hosts_service()\n- hosts = hosts_service.list(search=module.params['pattern'])\n+ hosts = hosts_service.list(\n+ search=module.params['pattern'],\n+ all_content=module.params['all_content'],\n+ )\n module.exit_json(\n changed=False,\n ansible_facts=dict(\n", "issue": "Missing all_content param in ovirt_hosts_facts\n##### SUMMARY\r\novirt_hosts_facts misses the all_content param and so it's not possible to get back the whole host details. ovirt_vms_facts list, for instance, has it.\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\novirt_hosts_facts\r\n\r\n##### ANSIBLE VERSION\r\n```\r\nansible 2.6.1\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = [u'/home/stirabos/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python2.7/site-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 2.7.15 (default, May 16 2018, 17:50:09) [GCC 8.1.1 20180502 (Red Hat 8.1.1-1)]\r\n```\r\n\r\n##### CONFIGURATION\r\n\r\n\r\n##### OS / ENVIRONMENT\r\nN/A\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used. -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n - ovirt_vms_facts:\r\n auth: \"{{ ovirt_auth }}\"\r\n all_content: true\r\n - debug: var=ovirt_vms\r\n - ovirt_hosts_facts:\r\n auth: \"{{ ovirt_auth }}\"\r\n all_content: true\r\n - debug: var=ovirt_hosts\r\n```\r\n\r\n##### EXPECTED RESULTS\r\na list of hosts with full detail for each of them\r\n\r\n##### ACTUAL RESULTS\r\n```\r\nTASK [ovirt_hosts_facts] ******************************************************************************************************************************************************************************************\r\nfatal: [localhost]: FAILED! => {\"changed\": false, \"msg\": \"Unsupported parameters for (ovirt_hosts_facts) module: all_content Supported parameters include: auth, fetch_nested, nested_attributes, pattern\"}\r\n\tto retry, use: --limit @/root/test.retry\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2016 Red Hat, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: ovirt_hosts_facts\nshort_description: Retrieve facts about one or more oVirt/RHV hosts\nauthor: \"Ondra Machacek (@machacekondra)\"\nversion_added: \"2.3\"\ndescription:\n - \"Retrieve facts about one or more oVirt/RHV hosts.\"\nnotes:\n - \"This module creates a new top-level C(ovirt_hosts) fact, which\n contains a list of hosts.\"\noptions:\n pattern:\n description:\n - \"Search term which is accepted by oVirt/RHV search backend.\"\n - \"For example to search host X from datacenter Y use following pattern:\n name=X and datacenter=Y\"\nextends_documentation_fragment: ovirt_facts\n'''\n\nEXAMPLES = '''\n# Examples don't contain auth parameter for simplicity,\n# look at ovirt_auth module to see how to reuse authentication:\n\n# Gather facts about all hosts which names start with C(host) and\n# belong to data center C(west):\n- ovirt_hosts_facts:\n pattern: name=host* and datacenter=west\n- debug:\n var: ovirt_hosts\n'''\n\nRETURN = '''\novirt_hosts:\n description: \"List of dictionaries describing the hosts. Host attribues are mapped to dictionary keys,\n all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host.\"\n returned: On success.\n type: list\n'''\n\nimport traceback\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.ovirt import (\n check_sdk,\n create_connection,\n get_dict_of_struct,\n ovirt_facts_full_argument_spec,\n)\n\n\ndef main():\n argument_spec = ovirt_facts_full_argument_spec(\n pattern=dict(default='', required=False),\n )\n module = AnsibleModule(argument_spec)\n check_sdk(module)\n\n try:\n auth = module.params.pop('auth')\n connection = create_connection(auth)\n hosts_service = connection.system_service().hosts_service()\n hosts = hosts_service.list(search=module.params['pattern'])\n module.exit_json(\n changed=False,\n ansible_facts=dict(\n ovirt_hosts=[\n get_dict_of_struct(\n struct=c,\n connection=connection,\n fetch_nested=module.params.get('fetch_nested'),\n attributes=module.params.get('nested_attributes'),\n ) for c in hosts\n ],\n ),\n )\n except Exception as e:\n module.fail_json(msg=str(e), exception=traceback.format_exc())\n finally:\n connection.close(logout=auth.get('token') is None)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py"}]}
2,017
352
gh_patches_debug_11327
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-9504
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add support for backup in timestream backup ### Describe the feature This will use AWS backup service to take time-stream backup. ### Extra information or context _No response_ </issue> <code> [start of c7n/resources/timestream.py] 1 from c7n.manager import resources 2 from c7n.actions import Action 3 from c7n.filters.kms import KmsRelatedFilter 4 from c7n.query import DescribeSource, QueryResourceManager, TypeInfo 5 from c7n.utils import local_session, type_schema 6 from c7n.tags import ( 7 TagDelayedAction, 8 TagActionFilter, 9 Tag as TagAction, 10 RemoveTag as RemoveTagAction 11 ) 12 13 14 class DescribeTimestream(DescribeSource): 15 def augment(self, resources): 16 for r in resources: 17 client = local_session(self.manager.session_factory).client('timestream-write') 18 r['Tags'] = client.list_tags_for_resource(ResourceARN=r['Arn'])['Tags'] 19 return resources 20 21 22 @resources.register('timestream-database') 23 class TimestreamDatabase(QueryResourceManager): 24 class resource_type(TypeInfo): 25 service = 'timestream-write' 26 arn_type = '' 27 name = 'DatabaseName' 28 id = arn = 'Arn' 29 enum_spec = ('list_databases', 'Databases', {}) 30 permission_prefix = 'timestream' 31 permissions = ('timestream:ListDatabases', ) 32 permissions_augment = ("timestream:ListTagsForResource",) 33 source_mapping = { 34 'describe': DescribeTimestream, 35 } 36 37 38 @resources.register('timestream-table') 39 class TimestreamTable(QueryResourceManager): 40 class resource_type(TypeInfo): 41 service = 'timestream-write' 42 arn_type = '' 43 name = 'TableName' 44 id = arn = 'Arn' 45 enum_spec = ('list_tables', 'Tables', {}) 46 permission_prefix = 'timestream' 47 permissions = ('timestream:ListTables', ) 48 49 source_mapping = { 50 'describe': DescribeTimestream, 51 } 52 53 54 @TimestreamDatabase.action_registry.register('tag') 55 @TimestreamTable.action_registry.register('tag') 56 class TimestreamTag(TagAction): 57 58 permissions = ('timestream:TagResource', ) 59 60 def process_resource_set(self, client, resource_set, tags): 61 for r in resource_set: 62 client.tag_resource(ResourceARN=r['Arn'], Tags=tags) 63 64 65 @TimestreamDatabase.action_registry.register('remove-tag') 66 @TimestreamTable.action_registry.register('remove-tag') 67 class TimestreamRemoveTag(RemoveTagAction): 68 69 permissions = ('timestream:UntagResource', ) 70 71 def process_resource_set(self, client, resource_set, tag_keys): 72 for r in resource_set: 73 client.untag_resource(ResourceARN=r['Arn'], TagKeys=tag_keys) 74 75 76 TimestreamDatabase.action_registry.register('mark-for-op', TagDelayedAction) 77 TimestreamTable.action_registry.register('mark-for-op', TagDelayedAction) 78 79 TimestreamDatabase.filter_registry.register('marked-for-op', TagActionFilter) 80 TimestreamTable.filter_registry.register('marked-for-op', TagActionFilter) 81 82 83 @TimestreamTable.action_registry.register('delete') 84 class TimestreamTableDelete(Action): 85 """ 86 Deletes a timestream table 87 """ 88 89 schema = type_schema('delete') 90 permissions = ('timestream:DeleteTable', ) 91 92 def process(self, resources): 93 client = local_session(self.manager.session_factory).client('timestream-write') 94 for r in resources: 95 try: 96 client.delete_table( 97 DatabaseName=r['DatabaseName'], 98 TableName=r['TableName'] 99 ) 100 except client.exceptions.ResourceNotFoundException: 101 continue 102 103 104 @TimestreamDatabase.action_registry.register('delete') 105 class TimestreamDatabaseDelete(Action): 106 """ 107 Deletes a timestream database 108 """ 109 110 schema = type_schema('delete', force={'type': 'boolean', 'default': False}) 111 permissions = ( 112 'timestream:DeleteDatabase', 113 'timestream:ListTables', 'timestream:DeleteTable', ) 114 115 def process(self, resources): 116 client = local_session(self.manager.session_factory).client('timestream-write') 117 for r in resources: 118 try: 119 client.delete_database( 120 DatabaseName=r['DatabaseName'], 121 ) 122 except client.exceptions.ResourceNotFoundException: 123 continue 124 except client.exceptions.ValidationException: 125 if not self.data.get('force', False): 126 self.log.error( 127 f'Unable to delete database:{r["DatabaseName"]}, ' 128 'tables must be deleted first') 129 continue 130 tables = client.list_tables(DatabaseName=r['DatabaseName'])['Tables'] 131 TimestreamTableDelete( 132 data={'type': 'delete'}, 133 manager=self.manager, 134 log_dir=self.log_dir 135 ).process(tables) 136 client.delete_database( 137 DatabaseName=r['DatabaseName'], 138 ) 139 140 141 @TimestreamDatabase.filter_registry.register('kms-key') 142 class KmsFilter(KmsRelatedFilter): 143 RelatedIdsExpression = 'KmsKeyId' 144 [end of c7n/resources/timestream.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/c7n/resources/timestream.py b/c7n/resources/timestream.py --- a/c7n/resources/timestream.py +++ b/c7n/resources/timestream.py @@ -9,6 +9,7 @@ Tag as TagAction, RemoveTag as RemoveTagAction ) +from c7n.filters.backup import ConsecutiveAwsBackupsFilter class DescribeTimestream(DescribeSource): @@ -138,6 +139,9 @@ ) +TimestreamTable.filter_registry.register('consecutive-aws-backups', ConsecutiveAwsBackupsFilter) + + @TimestreamDatabase.filter_registry.register('kms-key') class KmsFilter(KmsRelatedFilter): RelatedIdsExpression = 'KmsKeyId'
{"golden_diff": "diff --git a/c7n/resources/timestream.py b/c7n/resources/timestream.py\n--- a/c7n/resources/timestream.py\n+++ b/c7n/resources/timestream.py\n@@ -9,6 +9,7 @@\n Tag as TagAction,\n RemoveTag as RemoveTagAction\n )\n+from c7n.filters.backup import ConsecutiveAwsBackupsFilter\n \n \n class DescribeTimestream(DescribeSource):\n@@ -138,6 +139,9 @@\n )\n \n \n+TimestreamTable.filter_registry.register('consecutive-aws-backups', ConsecutiveAwsBackupsFilter)\n+\n+\n @TimestreamDatabase.filter_registry.register('kms-key')\n class KmsFilter(KmsRelatedFilter):\n RelatedIdsExpression = 'KmsKeyId'\n", "issue": "Add support for backup in timestream backup\n### Describe the feature\n\nThis will use AWS backup service to take time-stream backup.\n\n### Extra information or context\n\n_No response_\n", "before_files": [{"content": "from c7n.manager import resources\nfrom c7n.actions import Action\nfrom c7n.filters.kms import KmsRelatedFilter\nfrom c7n.query import DescribeSource, QueryResourceManager, TypeInfo\nfrom c7n.utils import local_session, type_schema\nfrom c7n.tags import (\n TagDelayedAction,\n TagActionFilter,\n Tag as TagAction,\n RemoveTag as RemoveTagAction\n)\n\n\nclass DescribeTimestream(DescribeSource):\n def augment(self, resources):\n for r in resources:\n client = local_session(self.manager.session_factory).client('timestream-write')\n r['Tags'] = client.list_tags_for_resource(ResourceARN=r['Arn'])['Tags']\n return resources\n\n\[email protected]('timestream-database')\nclass TimestreamDatabase(QueryResourceManager):\n class resource_type(TypeInfo):\n service = 'timestream-write'\n arn_type = ''\n name = 'DatabaseName'\n id = arn = 'Arn'\n enum_spec = ('list_databases', 'Databases', {})\n permission_prefix = 'timestream'\n permissions = ('timestream:ListDatabases', )\n permissions_augment = (\"timestream:ListTagsForResource\",)\n source_mapping = {\n 'describe': DescribeTimestream,\n }\n\n\[email protected]('timestream-table')\nclass TimestreamTable(QueryResourceManager):\n class resource_type(TypeInfo):\n service = 'timestream-write'\n arn_type = ''\n name = 'TableName'\n id = arn = 'Arn'\n enum_spec = ('list_tables', 'Tables', {})\n permission_prefix = 'timestream'\n permissions = ('timestream:ListTables', )\n\n source_mapping = {\n 'describe': DescribeTimestream,\n }\n\n\[email protected]_registry.register('tag')\[email protected]_registry.register('tag')\nclass TimestreamTag(TagAction):\n\n permissions = ('timestream:TagResource', )\n\n def process_resource_set(self, client, resource_set, tags):\n for r in resource_set:\n client.tag_resource(ResourceARN=r['Arn'], Tags=tags)\n\n\[email protected]_registry.register('remove-tag')\[email protected]_registry.register('remove-tag')\nclass TimestreamRemoveTag(RemoveTagAction):\n\n permissions = ('timestream:UntagResource', )\n\n def process_resource_set(self, client, resource_set, tag_keys):\n for r in resource_set:\n client.untag_resource(ResourceARN=r['Arn'], TagKeys=tag_keys)\n\n\nTimestreamDatabase.action_registry.register('mark-for-op', TagDelayedAction)\nTimestreamTable.action_registry.register('mark-for-op', TagDelayedAction)\n\nTimestreamDatabase.filter_registry.register('marked-for-op', TagActionFilter)\nTimestreamTable.filter_registry.register('marked-for-op', TagActionFilter)\n\n\[email protected]_registry.register('delete')\nclass TimestreamTableDelete(Action):\n \"\"\"\n Deletes a timestream table\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('timestream:DeleteTable', )\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('timestream-write')\n for r in resources:\n try:\n client.delete_table(\n DatabaseName=r['DatabaseName'],\n TableName=r['TableName']\n )\n except client.exceptions.ResourceNotFoundException:\n continue\n\n\[email protected]_registry.register('delete')\nclass TimestreamDatabaseDelete(Action):\n \"\"\"\n Deletes a timestream database\n \"\"\"\n\n schema = type_schema('delete', force={'type': 'boolean', 'default': False})\n permissions = (\n 'timestream:DeleteDatabase',\n 'timestream:ListTables', 'timestream:DeleteTable', )\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('timestream-write')\n for r in resources:\n try:\n client.delete_database(\n DatabaseName=r['DatabaseName'],\n )\n except client.exceptions.ResourceNotFoundException:\n continue\n except client.exceptions.ValidationException:\n if not self.data.get('force', False):\n self.log.error(\n f'Unable to delete database:{r[\"DatabaseName\"]}, '\n 'tables must be deleted first')\n continue\n tables = client.list_tables(DatabaseName=r['DatabaseName'])['Tables']\n TimestreamTableDelete(\n data={'type': 'delete'},\n manager=self.manager,\n log_dir=self.log_dir\n ).process(tables)\n client.delete_database(\n DatabaseName=r['DatabaseName'],\n )\n\n\[email protected]_registry.register('kms-key')\nclass KmsFilter(KmsRelatedFilter):\n RelatedIdsExpression = 'KmsKeyId'\n", "path": "c7n/resources/timestream.py"}]}
1,936
169
gh_patches_debug_23069
rasdani/github-patches
git_diff
bokeh__bokeh-6911
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bokeh DateRangeSlider returns tuple of integers instead of dates In 0.12.7 the DateRangeSlider `.value` returns a tuple of integers. Additionally, in order to convert them back to dates you have to divide by 1000 (i.e. `datetime.fromtimestamp(the_tuple[0]/1000)`). Expected behavior: Return a tuple of Date objects. ``` from datetime import datetime from bokeh.models.widgets import DateRangeSlider from bokeh.io import curdoc def date_range_update(attrname, old, new): print('-- range values:', date_slider.value) # Works d1 = datetime.fromtimestamp(date_slider.value[0] / 1000) # Does not Work, gives error d2 = datetime.fromtimestamp(date_slider.value[0]) date_slider = DateRangeSlider(value=(date_start,date_end), start=date_start, end=date_end) date_slider.on_change('value', date_range_update) curdoc().add_root(date_slider) ``` #### Stack traceback and/or browser JavaScript console output #### Screenshots or screencasts of the bug in action </issue> <code> [start of bokeh/models/widgets/sliders.py] 1 """ Various kinds of slider widgets. 2 3 """ 4 from __future__ import absolute_import 5 6 from ...core.has_props import abstract 7 from ...core.properties import Bool, Int, Float, String, Date, Enum, Tuple, Instance, Color, Override 8 from ...core.enums import SliderCallbackPolicy 9 from ..callbacks import Callback 10 from .widget import Widget 11 12 @abstract 13 class AbstractSlider(Widget): 14 """ """ 15 16 title = String(default="", help=""" 17 Slider's label. 18 """) 19 20 show_value = Bool(default=True, help=""" 21 Whether or not show slider's value. 22 """) 23 24 format = String(help=""" 25 """) 26 27 orientation = Enum("horizontal", "vertical", help=""" 28 Orient the slider either horizontally (default) or vertically. 29 """) 30 31 direction = Enum("ltr", "rtl", help=""" 32 """) 33 34 tooltips = Bool(default=True, help=""" 35 """) 36 37 callback = Instance(Callback, help=""" 38 A callback to run in the browser whenever the current Slider value changes. 39 """) 40 41 callback_throttle = Float(default=200, help=""" 42 Number of millseconds to pause between callback calls as the slider is moved. 43 """) 44 45 callback_policy = Enum(SliderCallbackPolicy, default="throttle", help=""" 46 When the callback is initiated. This parameter can take on only one of three options: 47 48 * "continuous": the callback will be executed immediately for each movement of the slider 49 * "throttle": the callback will be executed at most every ``callback_throttle`` milliseconds. 50 * "mouseup": the callback will be executed only once when the slider is released. 51 52 The "mouseup" policy is intended for scenarios in which the callback is expensive in time. 53 """) 54 55 bar_color = Color(default="#e6e6e6", help=""" 56 """) 57 58 class Slider(AbstractSlider): 59 """ Slider-based number selection widget. """ 60 61 start = Float(help=""" 62 The minimum allowable value. 63 """) 64 65 end = Float(help=""" 66 The maximum allowable value. 67 """) 68 69 value = Float(help=""" 70 Initial or selected value. 71 """) 72 73 step = Float(default=1, help=""" 74 The step between consecutive values. 75 """) 76 77 format = Override(default="0[.]00") 78 79 class RangeSlider(AbstractSlider): 80 """ Range-slider based number range selection widget. """ 81 82 value = Tuple(Float, Float, help=""" 83 Initial or selected range. 84 """) 85 86 start = Float(help=""" 87 The minimum allowable value. 88 """) 89 90 end = Float(help=""" 91 The maximum allowable value. 92 """) 93 94 step = Float(default=1, help=""" 95 The step between consecutive values. 96 """) 97 98 format = Override(default="0[.]00") 99 100 class DateSlider(AbstractSlider): 101 """ Slider-based date selection widget. """ 102 103 value = Date(help=""" 104 Initial or selected value. 105 """) 106 107 start = Date(help=""" 108 The minimum allowable value. 109 """) 110 111 end = Date(help=""" 112 The maximum allowable value. 113 """) 114 115 step = Int(default=1, help=""" 116 The step between consecutive values. 117 """) 118 119 format = Override(default="%d %b %G") 120 121 class DateRangeSlider(AbstractSlider): 122 """ Slider-based date range selection widget. """ 123 124 value = Tuple(Date, Date, help=""" 125 Initial or selected range. 126 """) 127 128 start = Date(help=""" 129 The minimum allowable value. 130 """) 131 132 end = Date(help=""" 133 The maximum allowable value. 134 """) 135 136 step = Int(default=1, help=""" 137 The step between consecutive values. 138 """) 139 140 format = Override(default="%d %b %G") 141 [end of bokeh/models/widgets/sliders.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bokeh/models/widgets/sliders.py b/bokeh/models/widgets/sliders.py --- a/bokeh/models/widgets/sliders.py +++ b/bokeh/models/widgets/sliders.py @@ -3,6 +3,9 @@ """ from __future__ import absolute_import +from datetime import datetime +import numbers + from ...core.has_props import abstract from ...core.properties import Bool, Int, Float, String, Date, Enum, Tuple, Instance, Color, Override from ...core.enums import SliderCallbackPolicy @@ -121,6 +124,25 @@ class DateRangeSlider(AbstractSlider): """ Slider-based date range selection widget. """ + @property + def value_as_datetime(self): + ''' Convenience property to retrieve the value tuple as a tuple of + datetime objects. + + ''' + if self.value is None: + return None + v1, v2 = self.value + if isinstance(v1, numbers.Number): + d1 = datetime.utcfromtimestamp(v1 / 1000) + else: + d1 = v1 + if isinstance(v2, numbers.Number): + d2 = datetime.utcfromtimestamp(v2 / 1000) + else: + d2 = v2 + return d1, d2 + value = Tuple(Date, Date, help=""" Initial or selected range. """)
{"golden_diff": "diff --git a/bokeh/models/widgets/sliders.py b/bokeh/models/widgets/sliders.py\n--- a/bokeh/models/widgets/sliders.py\n+++ b/bokeh/models/widgets/sliders.py\n@@ -3,6 +3,9 @@\n \"\"\"\n from __future__ import absolute_import\n \n+from datetime import datetime\n+import numbers\n+\n from ...core.has_props import abstract\n from ...core.properties import Bool, Int, Float, String, Date, Enum, Tuple, Instance, Color, Override\n from ...core.enums import SliderCallbackPolicy\n@@ -121,6 +124,25 @@\n class DateRangeSlider(AbstractSlider):\n \"\"\" Slider-based date range selection widget. \"\"\"\n \n+ @property\n+ def value_as_datetime(self):\n+ ''' Convenience property to retrieve the value tuple as a tuple of\n+ datetime objects.\n+\n+ '''\n+ if self.value is None:\n+ return None\n+ v1, v2 = self.value\n+ if isinstance(v1, numbers.Number):\n+ d1 = datetime.utcfromtimestamp(v1 / 1000)\n+ else:\n+ d1 = v1\n+ if isinstance(v2, numbers.Number):\n+ d2 = datetime.utcfromtimestamp(v2 / 1000)\n+ else:\n+ d2 = v2\n+ return d1, d2\n+\n value = Tuple(Date, Date, help=\"\"\"\n Initial or selected range.\n \"\"\")\n", "issue": "Bokeh DateRangeSlider returns tuple of integers instead of dates\nIn 0.12.7 the DateRangeSlider `.value` returns a tuple of integers. Additionally, in order to convert them back to dates you have to divide by 1000 (i.e. `datetime.fromtimestamp(the_tuple[0]/1000)`).\r\n\r\nExpected behavior: Return a tuple of Date objects.\r\n\r\n```\r\nfrom datetime import datetime\r\nfrom bokeh.models.widgets import DateRangeSlider\r\nfrom bokeh.io import curdoc\r\n\r\ndef date_range_update(attrname, old, new):\r\n print('-- range values:', date_slider.value)\r\n # Works\r\n d1 = datetime.fromtimestamp(date_slider.value[0] / 1000) \r\n # Does not Work, gives error\r\n d2 = datetime.fromtimestamp(date_slider.value[0])\r\n\r\ndate_slider = DateRangeSlider(value=(date_start,date_end), start=date_start, end=date_end)\r\ndate_slider.on_change('value', date_range_update)\r\n\r\ncurdoc().add_root(date_slider)\r\n\r\n```\r\n\r\n#### Stack traceback and/or browser JavaScript console output\r\n\r\n#### Screenshots or screencasts of the bug in action\r\n\n", "before_files": [{"content": "\"\"\" Various kinds of slider widgets.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ...core.has_props import abstract\nfrom ...core.properties import Bool, Int, Float, String, Date, Enum, Tuple, Instance, Color, Override\nfrom ...core.enums import SliderCallbackPolicy\nfrom ..callbacks import Callback\nfrom .widget import Widget\n\n@abstract\nclass AbstractSlider(Widget):\n \"\"\" \"\"\"\n\n title = String(default=\"\", help=\"\"\"\n Slider's label.\n \"\"\")\n\n show_value = Bool(default=True, help=\"\"\"\n Whether or not show slider's value.\n \"\"\")\n\n format = String(help=\"\"\"\n \"\"\")\n\n orientation = Enum(\"horizontal\", \"vertical\", help=\"\"\"\n Orient the slider either horizontally (default) or vertically.\n \"\"\")\n\n direction = Enum(\"ltr\", \"rtl\", help=\"\"\"\n \"\"\")\n\n tooltips = Bool(default=True, help=\"\"\"\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current Slider value changes.\n \"\"\")\n\n callback_throttle = Float(default=200, help=\"\"\"\n Number of millseconds to pause between callback calls as the slider is moved.\n \"\"\")\n\n callback_policy = Enum(SliderCallbackPolicy, default=\"throttle\", help=\"\"\"\n When the callback is initiated. This parameter can take on only one of three options:\n\n * \"continuous\": the callback will be executed immediately for each movement of the slider\n * \"throttle\": the callback will be executed at most every ``callback_throttle`` milliseconds.\n * \"mouseup\": the callback will be executed only once when the slider is released.\n\n The \"mouseup\" policy is intended for scenarios in which the callback is expensive in time.\n \"\"\")\n\n bar_color = Color(default=\"#e6e6e6\", help=\"\"\"\n \"\"\")\n\nclass Slider(AbstractSlider):\n \"\"\" Slider-based number selection widget. \"\"\"\n\n start = Float(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Float(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n value = Float(help=\"\"\"\n Initial or selected value.\n \"\"\")\n\n step = Float(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"0[.]00\")\n\nclass RangeSlider(AbstractSlider):\n \"\"\" Range-slider based number range selection widget. \"\"\"\n\n value = Tuple(Float, Float, help=\"\"\"\n Initial or selected range.\n \"\"\")\n\n start = Float(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Float(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Float(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"0[.]00\")\n\nclass DateSlider(AbstractSlider):\n \"\"\" Slider-based date selection widget. \"\"\"\n\n value = Date(help=\"\"\"\n Initial or selected value.\n \"\"\")\n\n start = Date(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Date(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Int(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"%d %b %G\")\n\nclass DateRangeSlider(AbstractSlider):\n \"\"\" Slider-based date range selection widget. \"\"\"\n\n value = Tuple(Date, Date, help=\"\"\"\n Initial or selected range.\n \"\"\")\n\n start = Date(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Date(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Int(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"%d %b %G\")\n", "path": "bokeh/models/widgets/sliders.py"}]}
1,918
321
gh_patches_debug_30011
rasdani/github-patches
git_diff
microsoft__botbuilder-python-1889
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Teams Task Module - Deserialization Error on Teams mobile app for iOS ## Version botbuilder-integration-aiohttp 4.14.0 Python 3.8.6 ## Describe the bug Error when loading Task Module on iOS iOS 14.8.1 / MS Teams v3.20.0 ## To Reproduce 1. Deploy [sample bot 54.teams-task-module](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/54.teams-task-module) 2. Say hello and click on _Adaptive Card_ button 3. Deserialization Error when on iOS iOS 14.8.1 / Microsoft Teams v3.20.0 ![image](https://user-images.githubusercontent.com/4013036/146412591-61399a75-d3d3-4eb6-a0ec-36ffa3cac54c.png) ## Traceback _(file locations prefix intentionally removed)_ ``` File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1293, in _deserialize found_value = key_extractor(attr, attr_desc, data) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1064, in rest_key_extractor return working_data.get(key) AttributeError: 'str' object has no attribute 'get' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "test_teams_task/env/lib/site-packages/botbuilder/core/bot_adapter.py", line 129, in run_pipeline context, callback File "test_teams_task/env/lib/site-packages/botbuilder/core/middleware_set.py", line 69, in receive_activity_with_status return await self.receive_activity_internal(context, callback) File "test_teams_task/env/lib/site-packages/botbuilder/core/middleware_set.py", line 79, in receive_activity_internal return await callback(context) File "test_teams_task/env/lib/site-packages/botbuilder/core/activity_handler.py", line 78, in on_turn invoke_response = await self.on_invoke_activity(turn_context) File "test_teams_task/env/lib/site-packages/botbuilder/core/teams/teams_activity_handler.py", line 155, in on_invoke_activity TaskModuleRequest, turn_context.activity.value File "test_teams_task/env/lib/site-packages/botbuilder/core/serializer_helper.py", line 28, in deserializer_helper return deserializer(msrest_cls.__name__, dict_to_deserialize) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1233, in __call__ return self._deserialize(target_obj, data) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1299, in _deserialize value = self.deserialize_data(raw_value, attr_desc['type']) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1468, in deserialize_data return self._deserialize(obj_type, data) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1303, in _deserialize raise_with_traceback(DeserializationError, msg, err) File "test_teams_task/env/lib/site-packages/msrest/exceptions.py", line 51, in raise_with_traceback raise error.with_traceback(exc_traceback) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1293, in _deserialize found_value = key_extractor(attr, attr_desc, data) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1064, in rest_key_extractor return working_data.get(key) msrest.exceptions.DeserializationError: Unable to deserialize to object: type, AttributeError: 'str' object has no attribute 'get' ``` ## Expected behavior This sample bot raises no error when interacting on the following platforms: - Windows 10 (Desktop app, Firefox, Chrome) - macOS (Chrome) - Android (Mobile app) ![image](https://user-images.githubusercontent.com/4013036/146413680-7bc42c4d-9876-4d18-9a61-7b94b4a5cccb.png) It was possible to interact with Task Module on iOS iOS 14.8.1 / Microsoft Teams v3.20.0 when deploying these samples (python not included): https://docs.microsoft.com/en-us/samples/officedev/microsoft-teams-samples/ms-teams-task-sample/ ## Additional context Initially the error was detected on a bot in production currently deployed in Azure. Since the error message is the same when running [bot sample 54.teams-task-module](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/54.teams-task-module), for the sake of repro we can take this example. </issue> <code> [start of libraries/botbuilder-core/botbuilder/core/serializer_helper.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 from inspect import getmembers 5 from typing import Type 6 from enum import Enum 7 8 from msrest.serialization import Model, Deserializer, Serializer 9 10 import botbuilder.schema as schema 11 import botbuilder.schema.teams as teams_schema 12 13 DEPENDICIES = [ 14 schema_cls 15 for key, schema_cls in getmembers(schema) 16 if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum)) 17 ] 18 DEPENDICIES += [ 19 schema_cls 20 for key, schema_cls in getmembers(teams_schema) 21 if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum)) 22 ] 23 DEPENDICIES_DICT = {dependency.__name__: dependency for dependency in DEPENDICIES} 24 25 26 def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model: 27 deserializer = Deserializer(DEPENDICIES_DICT) 28 return deserializer(msrest_cls.__name__, dict_to_deserialize) 29 30 31 def serializer_helper(object_to_serialize: Model) -> dict: 32 if object_to_serialize is None: 33 return None 34 35 serializer = Serializer(DEPENDICIES_DICT) 36 # pylint: disable=protected-access 37 return serializer._serialize(object_to_serialize) 38 [end of libraries/botbuilder-core/botbuilder/core/serializer_helper.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libraries/botbuilder-core/botbuilder/core/serializer_helper.py b/libraries/botbuilder-core/botbuilder/core/serializer_helper.py --- a/libraries/botbuilder-core/botbuilder/core/serializer_helper.py +++ b/libraries/botbuilder-core/botbuilder/core/serializer_helper.py @@ -1,6 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. - +from copy import copy from inspect import getmembers from typing import Type from enum import Enum @@ -25,6 +25,9 @@ def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model: deserializer = Deserializer(DEPENDICIES_DICT) + _clean_data_for_serialization( + deserializer.dependencies[msrest_cls.__name__], dict_to_deserialize + ) return deserializer(msrest_cls.__name__, dict_to_deserialize) @@ -35,3 +38,21 @@ serializer = Serializer(DEPENDICIES_DICT) # pylint: disable=protected-access return serializer._serialize(object_to_serialize) + + +def _clean_data_for_serialization(msrest_cls: Type[Model], dict_to_deserialize: dict): + # pylint: disable=protected-access + # Clean channel response of empty strings for expected objects. + if not isinstance(dict_to_deserialize, dict): + return + serialization_model = copy(msrest_cls._attribute_map) + for key, value in msrest_cls._attribute_map.items(): + if key != value["key"]: + serialization_model[value["key"]] = value + for prop, prop_value in dict_to_deserialize.items(): + if ( + prop in serialization_model + and serialization_model[prop]["type"] in DEPENDICIES_DICT + and not prop_value + ): + dict_to_deserialize[prop] = None
{"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/serializer_helper.py b/libraries/botbuilder-core/botbuilder/core/serializer_helper.py\n--- a/libraries/botbuilder-core/botbuilder/core/serializer_helper.py\n+++ b/libraries/botbuilder-core/botbuilder/core/serializer_helper.py\n@@ -1,6 +1,6 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n-\n+from copy import copy\n from inspect import getmembers\n from typing import Type\n from enum import Enum\n@@ -25,6 +25,9 @@\n \n def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:\n deserializer = Deserializer(DEPENDICIES_DICT)\n+ _clean_data_for_serialization(\n+ deserializer.dependencies[msrest_cls.__name__], dict_to_deserialize\n+ )\n return deserializer(msrest_cls.__name__, dict_to_deserialize)\n \n \n@@ -35,3 +38,21 @@\n serializer = Serializer(DEPENDICIES_DICT)\n # pylint: disable=protected-access\n return serializer._serialize(object_to_serialize)\n+\n+\n+def _clean_data_for_serialization(msrest_cls: Type[Model], dict_to_deserialize: dict):\n+ # pylint: disable=protected-access\n+ # Clean channel response of empty strings for expected objects.\n+ if not isinstance(dict_to_deserialize, dict):\n+ return\n+ serialization_model = copy(msrest_cls._attribute_map)\n+ for key, value in msrest_cls._attribute_map.items():\n+ if key != value[\"key\"]:\n+ serialization_model[value[\"key\"]] = value\n+ for prop, prop_value in dict_to_deserialize.items():\n+ if (\n+ prop in serialization_model\n+ and serialization_model[prop][\"type\"] in DEPENDICIES_DICT\n+ and not prop_value\n+ ):\n+ dict_to_deserialize[prop] = None\n", "issue": "Teams Task Module - Deserialization Error on Teams mobile app for iOS\n## Version\r\nbotbuilder-integration-aiohttp 4.14.0\r\nPython 3.8.6 \r\n\r\n## Describe the bug\r\nError when loading Task Module on iOS iOS 14.8.1 / MS Teams v3.20.0\r\n\r\n## To Reproduce\r\n1. Deploy [sample bot 54.teams-task-module](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/54.teams-task-module) \r\n2. Say hello and click on _Adaptive Card_ button\r\n3. Deserialization Error when on iOS iOS 14.8.1 / Microsoft Teams v3.20.0\r\n![image](https://user-images.githubusercontent.com/4013036/146412591-61399a75-d3d3-4eb6-a0ec-36ffa3cac54c.png)\r\n\r\n## Traceback\r\n_(file locations prefix intentionally removed)_\r\n```\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1293, in _deserialize\r\n found_value = key_extractor(attr, attr_desc, data)\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1064, in rest_key_extractor\r\n return working_data.get(key)\r\nAttributeError: 'str' object has no attribute 'get'\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\n File \"test_teams_task/env/lib/site-packages/botbuilder/core/bot_adapter.py\", line 129, in run_pipeline\r\n context, callback\r\n File \"test_teams_task/env/lib/site-packages/botbuilder/core/middleware_set.py\", line 69, in receive_activity_with_status\r\n return await self.receive_activity_internal(context, callback)\r\n File \"test_teams_task/env/lib/site-packages/botbuilder/core/middleware_set.py\", line 79, in receive_activity_internal\r\n return await callback(context)\r\n File \"test_teams_task/env/lib/site-packages/botbuilder/core/activity_handler.py\", line 78, in on_turn\r\n invoke_response = await self.on_invoke_activity(turn_context)\r\n File \"test_teams_task/env/lib/site-packages/botbuilder/core/teams/teams_activity_handler.py\", line 155, in on_invoke_activity\r\n TaskModuleRequest, turn_context.activity.value\r\n File \"test_teams_task/env/lib/site-packages/botbuilder/core/serializer_helper.py\", line 28, in deserializer_helper\r\n return deserializer(msrest_cls.__name__, dict_to_deserialize)\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1233, in __call__\r\n return self._deserialize(target_obj, data)\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1299, in _deserialize\r\n value = self.deserialize_data(raw_value, attr_desc['type'])\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1468, in deserialize_data\r\n return self._deserialize(obj_type, data)\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1303, in _deserialize\r\n raise_with_traceback(DeserializationError, msg, err)\r\n File \"test_teams_task/env/lib/site-packages/msrest/exceptions.py\", line 51, in raise_with_traceback\r\n raise error.with_traceback(exc_traceback)\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1293, in _deserialize\r\n found_value = key_extractor(attr, attr_desc, data)\r\n File \"test_teams_task/env/lib/site-packages/msrest/serialization.py\", line 1064, in rest_key_extractor\r\n return working_data.get(key)\r\nmsrest.exceptions.DeserializationError: Unable to deserialize to object: type, AttributeError: 'str' object has no attribute 'get'\r\n```\r\n\r\n## Expected behavior\r\nThis sample bot raises no error when interacting on the following platforms:\r\n- Windows 10 (Desktop app, Firefox, Chrome)\r\n- macOS (Chrome)\r\n- Android (Mobile app)\r\n![image](https://user-images.githubusercontent.com/4013036/146413680-7bc42c4d-9876-4d18-9a61-7b94b4a5cccb.png)\r\n\r\nIt was possible to interact with Task Module on iOS iOS 14.8.1 / Microsoft Teams v3.20.0 when deploying these samples (python not included):\r\nhttps://docs.microsoft.com/en-us/samples/officedev/microsoft-teams-samples/ms-teams-task-sample/\r\n\r\n## Additional context\r\nInitially the error was detected on a bot in production currently deployed in Azure. Since the error message is the same when running [bot sample 54.teams-task-module](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/54.teams-task-module), for the sake of repro we can take this example. \r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom inspect import getmembers\nfrom typing import Type\nfrom enum import Enum\n\nfrom msrest.serialization import Model, Deserializer, Serializer\n\nimport botbuilder.schema as schema\nimport botbuilder.schema.teams as teams_schema\n\nDEPENDICIES = [\n schema_cls\n for key, schema_cls in getmembers(schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n]\nDEPENDICIES += [\n schema_cls\n for key, schema_cls in getmembers(teams_schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n]\nDEPENDICIES_DICT = {dependency.__name__: dependency for dependency in DEPENDICIES}\n\n\ndef deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:\n deserializer = Deserializer(DEPENDICIES_DICT)\n return deserializer(msrest_cls.__name__, dict_to_deserialize)\n\n\ndef serializer_helper(object_to_serialize: Model) -> dict:\n if object_to_serialize is None:\n return None\n\n serializer = Serializer(DEPENDICIES_DICT)\n # pylint: disable=protected-access\n return serializer._serialize(object_to_serialize)\n", "path": "libraries/botbuilder-core/botbuilder/core/serializer_helper.py"}]}
2,007
429
gh_patches_debug_4889
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-4990
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove $ before shell commands in docs The developers have a [preference](https://github.com/rtfd/readthedocs.org/pull/4676#discussion_r221400605) to not have a `$` before shell commands in the docs. This makes it easier to copy and paste from our docs. We should remove it everywhere. The following command should show it everywhere. grep -Ri " $ " docs/*.rst docs/*/*.rst </issue> <code> [start of docs/conf.py] 1 # -*- coding: utf-8 -*- 2 3 from __future__ import division, print_function, unicode_literals 4 5 import os 6 import sys 7 8 import sphinx_rtd_theme 9 from recommonmark.parser import CommonMarkParser 10 11 sys.path.insert(0, os.path.abspath('..')) 12 sys.path.append(os.path.dirname(__file__)) 13 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "readthedocs.settings.dev") 14 15 from django.conf import settings 16 from django.utils import timezone 17 18 import django 19 django.setup() 20 21 22 sys.path.append(os.path.abspath('_ext')) 23 extensions = [ 24 'sphinx.ext.autosectionlabel', 25 'sphinx.ext.autodoc', 26 'sphinx.ext.intersphinx', 27 'sphinxcontrib.httpdomain', 28 'djangodocs', 29 'doc_extensions', 30 'sphinx_tabs.tabs', 31 ] 32 templates_path = ['_templates'] 33 34 source_suffix = ['.rst', '.md'] 35 source_parsers = { 36 '.md': CommonMarkParser, 37 } 38 39 master_doc = 'index' 40 project = u'Read the Docs' 41 copyright = '2010-{}, Read the Docs, Inc & contributors'.format( 42 timezone.now().year 43 ) 44 version = '2.7' 45 release = version 46 exclude_patterns = ['_build'] 47 default_role = 'obj' 48 intersphinx_mapping = { 49 'python': ('http://python.readthedocs.io/en/latest/', None), 50 'django': ('http://django.readthedocs.io/en/1.9.x/', None), 51 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None), 52 } 53 htmlhelp_basename = 'ReadTheDocsdoc' 54 latex_documents = [ 55 ('index', 'ReadTheDocs.tex', u'Read the Docs Documentation', 56 u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'), 57 ] 58 man_pages = [ 59 ('index', 'read-the-docs', u'Read the Docs Documentation', 60 [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1) 61 ] 62 63 exclude_patterns = [ 64 # 'api' # needed for ``make gettext`` to not die. 65 ] 66 67 language = 'en' 68 69 locale_dirs = [ 70 'locale/', 71 ] 72 gettext_compact = False 73 74 html_theme = 'sphinx_rtd_theme' 75 html_static_path = ['_static'] 76 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 77 html_logo = 'img/logo.svg' 78 html_theme_options = { 79 'logo_only': True, 80 'display_version': False, 81 } 82 83 # Activate autosectionlabel plugin 84 autosectionlabel_prefix_document = True 85 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -28,6 +28,7 @@ 'djangodocs', 'doc_extensions', 'sphinx_tabs.tabs', + 'sphinx-prompt', ] templates_path = ['_templates'] @@ -82,3 +83,7 @@ # Activate autosectionlabel plugin autosectionlabel_prefix_document = True + + +def setup(app): + app.add_stylesheet('css/sphinx_prompt_css.css')
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -28,6 +28,7 @@\n 'djangodocs',\n 'doc_extensions',\n 'sphinx_tabs.tabs',\n+ 'sphinx-prompt',\n ]\n templates_path = ['_templates']\n \n@@ -82,3 +83,7 @@\n \n # Activate autosectionlabel plugin\n autosectionlabel_prefix_document = True\n+\n+\n+def setup(app):\n+ app.add_stylesheet('css/sphinx_prompt_css.css')\n", "issue": "Remove $ before shell commands in docs\nThe developers have a [preference](https://github.com/rtfd/readthedocs.org/pull/4676#discussion_r221400605) to not have a `$` before shell commands in the docs. This makes it easier to copy and paste from our docs. We should remove it everywhere. The following command should show it everywhere.\r\n\r\n grep -Ri \" $ \" docs/*.rst docs/*/*.rst\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, unicode_literals\n\nimport os\nimport sys\n\nimport sphinx_rtd_theme\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n 'sphinx_tabs.tabs',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read the Docs'\ncopyright = '2010-{}, Read the Docs, Inc & contributors'.format(\n timezone.now().year\n)\nversion = '2.7'\nrelease = version\nexclude_patterns = ['_build']\ndefault_role = 'obj'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.9.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read the Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read the Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_static_path = ['_static']\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_logo = 'img/logo.svg'\nhtml_theme_options = {\n 'logo_only': True,\n 'display_version': False,\n}\n\n# Activate autosectionlabel plugin\nautosectionlabel_prefix_document = True\n", "path": "docs/conf.py"}]}
1,345
123
gh_patches_debug_19225
rasdani/github-patches
git_diff
docker__docker-py-971
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tlsv1 alert protocol version on 1.7.1 and 1.7.2 but not on 1.7.0 Similar to #949 I'm discovering issues with latest versions of `docker-py` running against docker 1.10.2 instance. I'm using `docker.utils.kwargs_from_env(assert_hostname=False)`. Things work fine with version 1.7.0. Docker client is initialized via ``` client = docker.Client( version='auto', **docker.utils.kwargs_from_env(assert_hostname=False)) ``` with docker environment variables being set to the following (via docker-machine) ``` DOCKER_HOST=tcp://192.168.156.137:2376 DOCKER_MACHINE_NAME=dev2 DOCKER_TLS_VERIFY=1 DOCKER_CERT_PATH=/Users/benjixx/.docker/machine/machines/dev2 ``` docker-py 1.7.1 and 1.7.2 now raise the following exception: ``` DockerException: Error while fetching server API version: [Errno 1] _ssl.c:507: error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version ``` Any idea what's happening here? </issue> <code> [start of docker/tls.py] 1 import os 2 3 from . import errors 4 from .ssladapter import ssladapter 5 6 7 class TLSConfig(object): 8 cert = None 9 ca_cert = None 10 verify = None 11 ssl_version = None 12 13 def __init__(self, client_cert=None, ca_cert=None, verify=None, 14 ssl_version=None, assert_hostname=None, 15 assert_fingerprint=None): 16 # Argument compatibility/mapping with 17 # https://docs.docker.com/engine/articles/https/ 18 # This diverges from the Docker CLI in that users can specify 'tls' 19 # here, but also disable any public/default CA pool verification by 20 # leaving tls_verify=False 21 22 self.ssl_version = ssl_version 23 self.assert_hostname = assert_hostname 24 self.assert_fingerprint = assert_fingerprint 25 26 # "tls" and "tls_verify" must have both or neither cert/key files 27 # In either case, Alert the user when both are expected, but any are 28 # missing. 29 30 if client_cert: 31 try: 32 tls_cert, tls_key = client_cert 33 except ValueError: 34 raise errors.TLSParameterError( 35 'client_config must be a tuple of' 36 ' (client certificate, key file)' 37 ) 38 39 if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or 40 not os.path.isfile(tls_key)): 41 raise errors.TLSParameterError( 42 'Path to a certificate and key files must be provided' 43 ' through the client_config param' 44 ) 45 self.cert = (tls_cert, tls_key) 46 47 # If verify is set, make sure the cert exists 48 self.verify = verify 49 self.ca_cert = ca_cert 50 if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert): 51 raise errors.TLSParameterError( 52 'Invalid CA certificate provided for `tls_ca_cert`.' 53 ) 54 55 def configure_client(self, client): 56 client.ssl_version = self.ssl_version 57 58 if self.verify and self.ca_cert: 59 client.verify = self.ca_cert 60 else: 61 client.verify = self.verify 62 63 if self.cert: 64 client.cert = self.cert 65 66 client.mount('https://', ssladapter.SSLAdapter( 67 ssl_version=self.ssl_version, 68 assert_hostname=self.assert_hostname, 69 assert_fingerprint=self.assert_fingerprint, 70 )) 71 [end of docker/tls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/tls.py b/docker/tls.py --- a/docker/tls.py +++ b/docker/tls.py @@ -1,4 +1,5 @@ import os +import ssl from . import errors from .ssladapter import ssladapter @@ -19,10 +20,14 @@ # here, but also disable any public/default CA pool verification by # leaving tls_verify=False - self.ssl_version = ssl_version self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint + # TLS v1.0 seems to be the safest default; SSLv23 fails in mysterious + # ways: https://github.com/docker/docker-py/issues/963 + + self.ssl_version = ssl_version or ssl.PROTOCOL_TLSv1 + # "tls" and "tls_verify" must have both or neither cert/key files # In either case, Alert the user when both are expected, but any are # missing.
{"golden_diff": "diff --git a/docker/tls.py b/docker/tls.py\n--- a/docker/tls.py\n+++ b/docker/tls.py\n@@ -1,4 +1,5 @@\n import os\n+import ssl\n \n from . import errors\n from .ssladapter import ssladapter\n@@ -19,10 +20,14 @@\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n \n- self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n \n+ # TLS v1.0 seems to be the safest default; SSLv23 fails in mysterious\n+ # ways: https://github.com/docker/docker-py/issues/963\n+\n+ self.ssl_version = ssl_version or ssl.PROTOCOL_TLSv1\n+\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n", "issue": "tlsv1 alert protocol version on 1.7.1 and 1.7.2 but not on 1.7.0\nSimilar to #949 I'm discovering issues with latest versions of `docker-py` running against docker 1.10.2 instance. I'm using `docker.utils.kwargs_from_env(assert_hostname=False)`. Things work fine with version 1.7.0.\n\nDocker client is initialized via\n\n```\nclient = docker.Client(\n version='auto',\n **docker.utils.kwargs_from_env(assert_hostname=False))\n```\n\nwith docker environment variables being set to the following (via docker-machine)\n\n```\nDOCKER_HOST=tcp://192.168.156.137:2376\nDOCKER_MACHINE_NAME=dev2\nDOCKER_TLS_VERIFY=1\nDOCKER_CERT_PATH=/Users/benjixx/.docker/machine/machines/dev2\n```\n\ndocker-py 1.7.1 and 1.7.2 now raise the following exception:\n\n```\nDockerException: Error while fetching server API version: [Errno 1] _ssl.c:507: error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version\n```\n\nAny idea what's happening here?\n\n", "before_files": [{"content": "import os\n\nfrom . import errors\nfrom .ssladapter import ssladapter\n\n\nclass TLSConfig(object):\n cert = None\n ca_cert = None\n verify = None\n ssl_version = None\n\n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n ssl_version=None, assert_hostname=None,\n assert_fingerprint=None):\n # Argument compatibility/mapping with\n # https://docs.docker.com/engine/articles/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n self.assert_fingerprint = assert_fingerprint\n\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n\n if client_cert:\n try:\n tls_cert, tls_key = client_cert\n except ValueError:\n raise errors.TLSParameterError(\n 'client_config must be a tuple of'\n ' (client certificate, key file)'\n )\n\n if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or\n not os.path.isfile(tls_key)):\n raise errors.TLSParameterError(\n 'Path to a certificate and key files must be provided'\n ' through the client_config param'\n )\n self.cert = (tls_cert, tls_key)\n\n # If verify is set, make sure the cert exists\n self.verify = verify\n self.ca_cert = ca_cert\n if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):\n raise errors.TLSParameterError(\n 'Invalid CA certificate provided for `tls_ca_cert`.'\n )\n\n def configure_client(self, client):\n client.ssl_version = self.ssl_version\n\n if self.verify and self.ca_cert:\n client.verify = self.ca_cert\n else:\n client.verify = self.verify\n\n if self.cert:\n client.cert = self.cert\n\n client.mount('https://', ssladapter.SSLAdapter(\n ssl_version=self.ssl_version,\n assert_hostname=self.assert_hostname,\n assert_fingerprint=self.assert_fingerprint,\n ))\n", "path": "docker/tls.py"}]}
1,450
224
gh_patches_debug_12926
rasdani/github-patches
git_diff
bokeh__bokeh-6804
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Automatic configuration of Slider.format Integer sliders should use integer formatting. </issue> <code> [start of bokeh/models/widgets/sliders.py] 1 """ Various kinds of slider widgets. 2 3 """ 4 from __future__ import absolute_import 5 6 from ...core.has_props import abstract 7 from ...core.properties import Bool, Int, Float, String, Date, Enum, Tuple, Instance, Color, Override 8 from ...core.enums import SliderCallbackPolicy 9 from ..callbacks import Callback 10 from .widget import Widget 11 12 @abstract 13 class AbstractSlider(Widget): 14 """ """ 15 16 title = String(default="", help=""" 17 Slider's label. 18 """) 19 20 show_value = Bool(default=True, help=""" 21 Whether or not show slider's value. 22 """) 23 24 format = String(help=""" 25 """) 26 27 orientation = Enum("horizontal", "vertical", help=""" 28 Orient the slider either horizontally (default) or vertically. 29 """) 30 31 direction = Enum("ltr", "rtl", help=""" 32 """) 33 34 tooltips = Bool(default=True, help=""" 35 """) 36 37 callback = Instance(Callback, help=""" 38 A callback to run in the browser whenever the current Slider value changes. 39 """) 40 41 callback_throttle = Float(default=200, help=""" 42 Number of millseconds to pause between callback calls as the slider is moved. 43 """) 44 45 callback_policy = Enum(SliderCallbackPolicy, default="throttle", help=""" 46 When the callback is initiated. This parameter can take on only one of three options: 47 48 * "continuous": the callback will be executed immediately for each movement of the slider 49 * "throttle": the callback will be executed at most every ``callback_throttle`` milliseconds. 50 * "mouseup": the callback will be executed only once when the slider is released. 51 52 The "mouseup" policy is intended for scenarios in which the callback is expensive in time. 53 """) 54 55 bar_color = Color(default="#3fb8af", help=""" 56 """) 57 58 class Slider(AbstractSlider): 59 """ Slider-based number selection widget. """ 60 61 start = Float(help=""" 62 The minimum allowable value. 63 """) 64 65 end = Float(help=""" 66 The maximum allowable value. 67 """) 68 69 value = Float(help=""" 70 Initial or selected value. 71 """) 72 73 step = Float(default=1, help=""" 74 The step between consecutive values. 75 """) 76 77 format = Override(default="0,0.00") 78 79 class RangeSlider(AbstractSlider): 80 """ Range-slider based number range selection widget. """ 81 82 value = Tuple(Float, Float, help=""" 83 Initial or selected range. 84 """) 85 86 start = Float(help=""" 87 The minimum allowable value. 88 """) 89 90 end = Float(help=""" 91 The maximum allowable value. 92 """) 93 94 step = Float(default=1, help=""" 95 The step between consecutive values. 96 """) 97 98 format = Override(default="0,0.00") 99 100 class DateSlider(AbstractSlider): 101 """ Slider-based date selection widget. """ 102 103 value = Date(help=""" 104 Initial or selected value. 105 """) 106 107 start = Date(help=""" 108 The minimum allowable value. 109 """) 110 111 end = Date(help=""" 112 The maximum allowable value. 113 """) 114 115 step = Int(default=1, help=""" 116 The step between consecutive values. 117 """) 118 119 format = Override(default="%d %b %G") 120 121 class DateRangeSlider(AbstractSlider): 122 """ Slider-based date range selection widget. """ 123 124 value = Tuple(Date, Date, help=""" 125 Initial or selected range. 126 """) 127 128 start = Date(help=""" 129 The minimum allowable value. 130 """) 131 132 end = Date(help=""" 133 The maximum allowable value. 134 """) 135 136 step = Int(default=1, help=""" 137 The step between consecutive values. 138 """) 139 140 format = Override(default="%d %b %G") 141 [end of bokeh/models/widgets/sliders.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bokeh/models/widgets/sliders.py b/bokeh/models/widgets/sliders.py --- a/bokeh/models/widgets/sliders.py +++ b/bokeh/models/widgets/sliders.py @@ -74,7 +74,7 @@ The step between consecutive values. """) - format = Override(default="0,0.00") + format = Override(default="0[.]00") class RangeSlider(AbstractSlider): """ Range-slider based number range selection widget. """ @@ -95,7 +95,7 @@ The step between consecutive values. """) - format = Override(default="0,0.00") + format = Override(default="0[.]00") class DateSlider(AbstractSlider): """ Slider-based date selection widget. """
{"golden_diff": "diff --git a/bokeh/models/widgets/sliders.py b/bokeh/models/widgets/sliders.py\n--- a/bokeh/models/widgets/sliders.py\n+++ b/bokeh/models/widgets/sliders.py\n@@ -74,7 +74,7 @@\n The step between consecutive values.\n \"\"\")\n \n- format = Override(default=\"0,0.00\")\n+ format = Override(default=\"0[.]00\")\n \n class RangeSlider(AbstractSlider):\n \"\"\" Range-slider based number range selection widget. \"\"\"\n@@ -95,7 +95,7 @@\n The step between consecutive values.\n \"\"\")\n \n- format = Override(default=\"0,0.00\")\n+ format = Override(default=\"0[.]00\")\n \n class DateSlider(AbstractSlider):\n \"\"\" Slider-based date selection widget. \"\"\"\n", "issue": "Automatic configuration of Slider.format\nInteger sliders should use integer formatting.\r\n\n", "before_files": [{"content": "\"\"\" Various kinds of slider widgets.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ...core.has_props import abstract\nfrom ...core.properties import Bool, Int, Float, String, Date, Enum, Tuple, Instance, Color, Override\nfrom ...core.enums import SliderCallbackPolicy\nfrom ..callbacks import Callback\nfrom .widget import Widget\n\n@abstract\nclass AbstractSlider(Widget):\n \"\"\" \"\"\"\n\n title = String(default=\"\", help=\"\"\"\n Slider's label.\n \"\"\")\n\n show_value = Bool(default=True, help=\"\"\"\n Whether or not show slider's value.\n \"\"\")\n\n format = String(help=\"\"\"\n \"\"\")\n\n orientation = Enum(\"horizontal\", \"vertical\", help=\"\"\"\n Orient the slider either horizontally (default) or vertically.\n \"\"\")\n\n direction = Enum(\"ltr\", \"rtl\", help=\"\"\"\n \"\"\")\n\n tooltips = Bool(default=True, help=\"\"\"\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current Slider value changes.\n \"\"\")\n\n callback_throttle = Float(default=200, help=\"\"\"\n Number of millseconds to pause between callback calls as the slider is moved.\n \"\"\")\n\n callback_policy = Enum(SliderCallbackPolicy, default=\"throttle\", help=\"\"\"\n When the callback is initiated. This parameter can take on only one of three options:\n\n * \"continuous\": the callback will be executed immediately for each movement of the slider\n * \"throttle\": the callback will be executed at most every ``callback_throttle`` milliseconds.\n * \"mouseup\": the callback will be executed only once when the slider is released.\n\n The \"mouseup\" policy is intended for scenarios in which the callback is expensive in time.\n \"\"\")\n\n bar_color = Color(default=\"#3fb8af\", help=\"\"\"\n \"\"\")\n\nclass Slider(AbstractSlider):\n \"\"\" Slider-based number selection widget. \"\"\"\n\n start = Float(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Float(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n value = Float(help=\"\"\"\n Initial or selected value.\n \"\"\")\n\n step = Float(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"0,0.00\")\n\nclass RangeSlider(AbstractSlider):\n \"\"\" Range-slider based number range selection widget. \"\"\"\n\n value = Tuple(Float, Float, help=\"\"\"\n Initial or selected range.\n \"\"\")\n\n start = Float(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Float(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Float(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"0,0.00\")\n\nclass DateSlider(AbstractSlider):\n \"\"\" Slider-based date selection widget. \"\"\"\n\n value = Date(help=\"\"\"\n Initial or selected value.\n \"\"\")\n\n start = Date(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Date(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Int(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"%d %b %G\")\n\nclass DateRangeSlider(AbstractSlider):\n \"\"\" Slider-based date range selection widget. \"\"\"\n\n value = Tuple(Date, Date, help=\"\"\"\n Initial or selected range.\n \"\"\")\n\n start = Date(help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Date(help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Int(default=1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n format = Override(default=\"%d %b %G\")\n", "path": "bokeh/models/widgets/sliders.py"}]}
1,690
179
gh_patches_debug_22902
rasdani/github-patches
git_diff
Lightning-AI__pytorch-lightning-720
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> W&B: Allow for passing experiment into the WandbLogger (and logging semantics) Currently, the WandbLogger will automatically create a new internal experiment (run) whenever you create a new WandbLogger. # Issue If I instantiate a wandb experiment outside of the logger, then I will have two experiments when I train my model since there is no way to set the internal experiment of the WandbLogger to my current external experiment. # Potential Solution Allow for passing an experiment into the WandbLogger: ``` class WandbLogger(LightningLoggerBase): def __init__(self, name=None, save_dir=None, offline=False, id=None, anonymous=False, version=None, project=None, tags=None, experiment=None): . . . self._experiment = experiment ``` Then I can do this: ``` experiment = wandb.init(.......) wandb_logger = WandbLogger(experiment=experiment) ``` I made this change locally, however, I wasn't sure if this was something you also wanted to implement as well. It works for me. # Another small note In the `WandbLogger.log_metrics` function, I would change: `self.experiment.history.add(metrics)` --> `self.experiment.log(metrics)` </issue> <code> [start of pytorch_lightning/logging/wandb.py] 1 import os 2 3 try: 4 import wandb 5 except ImportError: 6 raise ImportError('Missing wandb package.') 7 8 from .base import LightningLoggerBase, rank_zero_only 9 10 11 class WandbLogger(LightningLoggerBase): 12 """ 13 Logger for W&B. 14 15 Args: 16 name (str): display name for the run. 17 save_dir (str): path where data is saved. 18 offline (bool): run offline (data can be streamed later to wandb servers). 19 id or version (str): sets the version, mainly used to resume a previous run. 20 anonymous (bool): enables or explicitly disables anonymous logging. 21 project (str): the name of the project to which this run will belong. 22 tags (list of str): tags associated with this run. 23 24 Example 25 -------- 26 .. code-block:: python 27 28 from pytorch_lightning.logging import WandbLogger 29 from pytorch_lightning import Trainer 30 31 wandb_logger = WandbLogger() 32 trainer = Trainer(logger=wandb_logger) 33 """ 34 35 def __init__(self, name=None, save_dir=None, offline=False, id=None, anonymous=False, 36 version=None, project=None, tags=None): 37 super().__init__() 38 self._name = name 39 self._save_dir = save_dir 40 self._anonymous = "allow" if anonymous else None 41 self._id = version or id 42 self._tags = tags 43 self._project = project 44 self._experiment = None 45 self._offline = offline 46 47 def __getstate__(self): 48 state = self.__dict__.copy() 49 # cannot be pickled 50 state['_experiment'] = None 51 # args needed to reload correct experiment 52 state['_id'] = self.experiment.id 53 return state 54 55 @property 56 def experiment(self): 57 r""" 58 59 Actual wandb object. To use wandb features do the following. 60 61 Example:: 62 63 self.logger.experiment.some_wandb_function() 64 65 """ 66 if self._experiment is None: 67 if self._offline: 68 os.environ["WANDB_MODE"] = "dryrun" 69 self._experiment = wandb.init( 70 name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous, 71 id=self._id, resume="allow", tags=self._tags) 72 return self._experiment 73 74 def watch(self, model, log="gradients", log_freq=100): 75 wandb.watch(model, log, log_freq) 76 77 @rank_zero_only 78 def log_hyperparams(self, params): 79 self.experiment.config.update(params) 80 81 @rank_zero_only 82 def log_metrics(self, metrics, step=None): 83 metrics["global_step"] = step 84 self.experiment.history.add(metrics) 85 86 def save(self): 87 pass 88 89 @rank_zero_only 90 def finalize(self, status='success'): 91 try: 92 exit_code = 0 if status == 'success' else 1 93 wandb.join(exit_code) 94 except TypeError: 95 wandb.join() 96 97 @property 98 def name(self): 99 return self.experiment.project_name() 100 101 @property 102 def version(self): 103 return self.experiment.id 104 [end of pytorch_lightning/logging/wandb.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pytorch_lightning/logging/wandb.py b/pytorch_lightning/logging/wandb.py --- a/pytorch_lightning/logging/wandb.py +++ b/pytorch_lightning/logging/wandb.py @@ -33,7 +33,7 @@ """ def __init__(self, name=None, save_dir=None, offline=False, id=None, anonymous=False, - version=None, project=None, tags=None): + version=None, project=None, tags=None, experiment=None): super().__init__() self._name = name self._save_dir = save_dir @@ -41,7 +41,7 @@ self._id = version or id self._tags = tags self._project = project - self._experiment = None + self._experiment = experiment self._offline = offline def __getstate__(self): @@ -81,7 +81,7 @@ @rank_zero_only def log_metrics(self, metrics, step=None): metrics["global_step"] = step - self.experiment.history.add(metrics) + self.experiment.log(metrics) def save(self): pass
{"golden_diff": "diff --git a/pytorch_lightning/logging/wandb.py b/pytorch_lightning/logging/wandb.py\n--- a/pytorch_lightning/logging/wandb.py\n+++ b/pytorch_lightning/logging/wandb.py\n@@ -33,7 +33,7 @@\n \"\"\"\n \n def __init__(self, name=None, save_dir=None, offline=False, id=None, anonymous=False,\n- version=None, project=None, tags=None):\n+ version=None, project=None, tags=None, experiment=None):\n super().__init__()\n self._name = name\n self._save_dir = save_dir\n@@ -41,7 +41,7 @@\n self._id = version or id\n self._tags = tags\n self._project = project\n- self._experiment = None\n+ self._experiment = experiment\n self._offline = offline\n \n def __getstate__(self):\n@@ -81,7 +81,7 @@\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n metrics[\"global_step\"] = step\n- self.experiment.history.add(metrics)\n+ self.experiment.log(metrics)\n \n def save(self):\n pass\n", "issue": "W&B: Allow for passing experiment into the WandbLogger (and logging semantics)\nCurrently, the WandbLogger will automatically create a new internal experiment (run) whenever you create a new WandbLogger.\r\n\r\n# Issue \r\n\r\nIf I instantiate a wandb experiment outside of the logger, then I will have two experiments when I train my model since there is no way to set the internal experiment of the WandbLogger to my current external experiment.\r\n\r\n# Potential Solution\r\nAllow for passing an experiment into the WandbLogger:\r\n\r\n```\r\nclass WandbLogger(LightningLoggerBase):\r\n def __init__(self, name=None, save_dir=None, offline=False, id=None, anonymous=False,\r\n version=None, project=None, tags=None, experiment=None):\r\n .\r\n .\r\n .\r\n self._experiment = experiment\r\n```\r\nThen I can do this:\r\n\r\n```\r\nexperiment = wandb.init(.......)\r\nwandb_logger = WandbLogger(experiment=experiment)\r\n```\r\nI made this change locally, however, I wasn't sure if this was something you also wanted to implement as well. It works for me.\r\n\r\n# Another small note\r\n\r\nIn the `WandbLogger.log_metrics` function, I would change:\r\n\r\n`self.experiment.history.add(metrics)` --> `self.experiment.log(metrics)`\n", "before_files": [{"content": "import os\n\ntry:\n import wandb\nexcept ImportError:\n raise ImportError('Missing wandb package.')\n\nfrom .base import LightningLoggerBase, rank_zero_only\n\n\nclass WandbLogger(LightningLoggerBase):\n \"\"\"\n Logger for W&B.\n\n Args:\n name (str): display name for the run.\n save_dir (str): path where data is saved.\n offline (bool): run offline (data can be streamed later to wandb servers).\n id or version (str): sets the version, mainly used to resume a previous run.\n anonymous (bool): enables or explicitly disables anonymous logging.\n project (str): the name of the project to which this run will belong.\n tags (list of str): tags associated with this run.\n\n Example\n --------\n .. code-block:: python\n\n from pytorch_lightning.logging import WandbLogger\n from pytorch_lightning import Trainer\n\n wandb_logger = WandbLogger()\n trainer = Trainer(logger=wandb_logger)\n \"\"\"\n\n def __init__(self, name=None, save_dir=None, offline=False, id=None, anonymous=False,\n version=None, project=None, tags=None):\n super().__init__()\n self._name = name\n self._save_dir = save_dir\n self._anonymous = \"allow\" if anonymous else None\n self._id = version or id\n self._tags = tags\n self._project = project\n self._experiment = None\n self._offline = offline\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # cannot be pickled\n state['_experiment'] = None\n # args needed to reload correct experiment\n state['_id'] = self.experiment.id\n return state\n\n @property\n def experiment(self):\n r\"\"\"\n\n Actual wandb object. To use wandb features do the following.\n\n Example::\n\n self.logger.experiment.some_wandb_function()\n\n \"\"\"\n if self._experiment is None:\n if self._offline:\n os.environ[\"WANDB_MODE\"] = \"dryrun\"\n self._experiment = wandb.init(\n name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,\n id=self._id, resume=\"allow\", tags=self._tags)\n return self._experiment\n\n def watch(self, model, log=\"gradients\", log_freq=100):\n wandb.watch(model, log, log_freq)\n\n @rank_zero_only\n def log_hyperparams(self, params):\n self.experiment.config.update(params)\n\n @rank_zero_only\n def log_metrics(self, metrics, step=None):\n metrics[\"global_step\"] = step\n self.experiment.history.add(metrics)\n\n def save(self):\n pass\n\n @rank_zero_only\n def finalize(self, status='success'):\n try:\n exit_code = 0 if status == 'success' else 1\n wandb.join(exit_code)\n except TypeError:\n wandb.join()\n\n @property\n def name(self):\n return self.experiment.project_name()\n\n @property\n def version(self):\n return self.experiment.id\n", "path": "pytorch_lightning/logging/wandb.py"}]}
1,706
263
gh_patches_debug_26619
rasdani/github-patches
git_diff
benoitc__gunicorn-826
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Upcoming Tornado change breaks gunicorn.workers.gtornado The next release of Tornado (will be 4.0 when released, although the current master branch hasn't been updated to change all mentions of 3.3 to 4.0) makes some major changes to the HTTP internals and breaks gunicorn's monkey-patching of HTTPConnection.finish. Fortunately, there is now a cleaner way to do the tracking that gunicorn wants to do here, since the interface between HTTPServer and Application is more formally defined by the HTTPServerConnectionDelegate and HTTPMessageDelegate interfaces, so you should be able to wrap the Application (which implements/subclasses HTTPServerConnectionDelegate) and the HTTPMessageDelegate its start_request method returns. </issue> <code> [start of gunicorn/workers/gtornado.py] 1 # -*- coding: utf-8 - 2 # 3 # This file is part of gunicorn released under the MIT license. 4 # See the NOTICE for more information. 5 6 import os 7 import sys 8 9 try: 10 import tornado.web 11 except ImportError: 12 raise RuntimeError("You need tornado installed to use this worker.") 13 import tornado.httpserver 14 from tornado.ioloop import IOLoop, PeriodicCallback 15 from tornado.wsgi import WSGIContainer 16 from gunicorn.workers.base import Worker 17 from gunicorn import __version__ as gversion 18 19 20 class TornadoWorker(Worker): 21 22 @classmethod 23 def setup(cls): 24 web = sys.modules.pop("tornado.web") 25 old_clear = web.RequestHandler.clear 26 27 def clear(self): 28 old_clear(self) 29 self._headers["Server"] += " (Gunicorn/%s)" % gversion 30 web.RequestHandler.clear = clear 31 sys.modules["tornado.web"] = web 32 33 def handle_exit(self, sig, frame): 34 if self.alive: 35 super(TornadoWorker, self).handle_exit(sig, frame) 36 self.stop() 37 38 def handle_request(self): 39 self.nr += 1 40 if self.alive and self.nr >= self.max_requests: 41 self.alive = False 42 self.log.info("Autorestarting worker after current request.") 43 self.stop() 44 45 def watchdog(self): 46 if self.alive: 47 self.notify() 48 49 if self.ppid != os.getppid(): 50 self.log.info("Parent changed, shutting down: %s", self) 51 self.stop() 52 53 def run(self): 54 self.ioloop = IOLoop.instance() 55 self.alive = True 56 PeriodicCallback(self.watchdog, 1000, io_loop=self.ioloop).start() 57 58 # Assume the app is a WSGI callable if its not an 59 # instance of tornado.web.Application or is an 60 # instance of tornado.wsgi.WSGIApplication 61 app = self.wsgi 62 if not isinstance(app, tornado.web.Application) or \ 63 isinstance(app, tornado.wsgi.WSGIApplication): 64 app = WSGIContainer(app) 65 66 # Monkey-patching HTTPConnection.finish to count the 67 # number of requests being handled by Tornado. This 68 # will help gunicorn shutdown the worker if max_requests 69 # is exceeded. 70 httpserver = sys.modules["tornado.httpserver"] 71 old_connection_finish = httpserver.HTTPConnection.finish 72 73 def finish(other): 74 self.handle_request() 75 old_connection_finish(other) 76 httpserver.HTTPConnection.finish = finish 77 sys.modules["tornado.httpserver"] = httpserver 78 79 if self.cfg.is_ssl: 80 server = tornado.httpserver.HTTPServer(app, io_loop=self.ioloop, 81 ssl_options=self.cfg.ssl_options) 82 else: 83 server = tornado.httpserver.HTTPServer(app, 84 io_loop=self.ioloop) 85 86 self.server = server 87 88 for s in self.sockets: 89 s.setblocking(0) 90 if hasattr(server, "add_socket"): # tornado > 2.0 91 server.add_socket(s) 92 elif hasattr(server, "_sockets"): # tornado 2.0 93 server._sockets[s.fileno()] = s 94 95 server.no_keep_alive = self.cfg.keepalive <= 0 96 server.start(num_processes=1) 97 98 self.ioloop.start() 99 100 def stop(self): 101 if hasattr(self, 'server'): 102 try: 103 self.server.stop() 104 except Exception: 105 pass 106 PeriodicCallback(self.stop_ioloop, 1000, io_loop=self.ioloop).start() 107 108 def stop_ioloop(self): 109 if not self.ioloop._callbacks and len(self.ioloop._timeouts) <= 1: 110 self.ioloop.stop() 111 [end of gunicorn/workers/gtornado.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gunicorn/workers/gtornado.py b/gunicorn/workers/gtornado.py --- a/gunicorn/workers/gtornado.py +++ b/gunicorn/workers/gtornado.py @@ -68,20 +68,31 @@ # will help gunicorn shutdown the worker if max_requests # is exceeded. httpserver = sys.modules["tornado.httpserver"] - old_connection_finish = httpserver.HTTPConnection.finish + if hasattr(httpserver, 'HTTPConnection'): + old_connection_finish = httpserver.HTTPConnection.finish - def finish(other): - self.handle_request() - old_connection_finish(other) - httpserver.HTTPConnection.finish = finish - sys.modules["tornado.httpserver"] = httpserver + def finish(other): + self.handle_request() + old_connection_finish(other) + httpserver.HTTPConnection.finish = finish + sys.modules["tornado.httpserver"] = httpserver + + server_class = tornado.httpserver.HTTPServer + else: + + class _HTTPServer(tornado.httpserver.HTTPServer): + + def on_close(instance, server_conn): + self.handle_request() + super(_HTTPServer, instance).on_close(server_conn) + + server_class = _HTTPServer if self.cfg.is_ssl: - server = tornado.httpserver.HTTPServer(app, io_loop=self.ioloop, + server = server_class(app, io_loop=self.ioloop, ssl_options=self.cfg.ssl_options) else: - server = tornado.httpserver.HTTPServer(app, - io_loop=self.ioloop) + server = server_class(app, io_loop=self.ioloop) self.server = server
{"golden_diff": "diff --git a/gunicorn/workers/gtornado.py b/gunicorn/workers/gtornado.py\n--- a/gunicorn/workers/gtornado.py\n+++ b/gunicorn/workers/gtornado.py\n@@ -68,20 +68,31 @@\n # will help gunicorn shutdown the worker if max_requests\n # is exceeded.\n httpserver = sys.modules[\"tornado.httpserver\"]\n- old_connection_finish = httpserver.HTTPConnection.finish\n+ if hasattr(httpserver, 'HTTPConnection'):\n+ old_connection_finish = httpserver.HTTPConnection.finish\n \n- def finish(other):\n- self.handle_request()\n- old_connection_finish(other)\n- httpserver.HTTPConnection.finish = finish\n- sys.modules[\"tornado.httpserver\"] = httpserver\n+ def finish(other):\n+ self.handle_request()\n+ old_connection_finish(other)\n+ httpserver.HTTPConnection.finish = finish\n+ sys.modules[\"tornado.httpserver\"] = httpserver\n+\n+ server_class = tornado.httpserver.HTTPServer\n+ else:\n+\n+ class _HTTPServer(tornado.httpserver.HTTPServer):\n+\n+ def on_close(instance, server_conn):\n+ self.handle_request()\n+ super(_HTTPServer, instance).on_close(server_conn)\n+\n+ server_class = _HTTPServer\n \n if self.cfg.is_ssl:\n- server = tornado.httpserver.HTTPServer(app, io_loop=self.ioloop,\n+ server = server_class(app, io_loop=self.ioloop,\n ssl_options=self.cfg.ssl_options)\n else:\n- server = tornado.httpserver.HTTPServer(app,\n- io_loop=self.ioloop)\n+ server = server_class(app, io_loop=self.ioloop)\n \n self.server = server\n", "issue": "Upcoming Tornado change breaks gunicorn.workers.gtornado\nThe next release of Tornado (will be 4.0 when released, although the current master branch hasn't been updated to change all mentions of 3.3 to 4.0) makes some major changes to the HTTP internals and breaks gunicorn's monkey-patching of HTTPConnection.finish. Fortunately, there is now a cleaner way to do the tracking that gunicorn wants to do here, since the interface between HTTPServer and Application is more formally defined by the HTTPServerConnectionDelegate and HTTPMessageDelegate interfaces, so you should be able to wrap the Application (which implements/subclasses HTTPServerConnectionDelegate) and the HTTPMessageDelegate its start_request method returns. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport os\nimport sys\n\ntry:\n import tornado.web\nexcept ImportError:\n raise RuntimeError(\"You need tornado installed to use this worker.\")\nimport tornado.httpserver\nfrom tornado.ioloop import IOLoop, PeriodicCallback\nfrom tornado.wsgi import WSGIContainer\nfrom gunicorn.workers.base import Worker\nfrom gunicorn import __version__ as gversion\n\n\nclass TornadoWorker(Worker):\n\n @classmethod\n def setup(cls):\n web = sys.modules.pop(\"tornado.web\")\n old_clear = web.RequestHandler.clear\n\n def clear(self):\n old_clear(self)\n self._headers[\"Server\"] += \" (Gunicorn/%s)\" % gversion\n web.RequestHandler.clear = clear\n sys.modules[\"tornado.web\"] = web\n\n def handle_exit(self, sig, frame):\n if self.alive:\n super(TornadoWorker, self).handle_exit(sig, frame)\n self.stop()\n\n def handle_request(self):\n self.nr += 1\n if self.alive and self.nr >= self.max_requests:\n self.alive = False\n self.log.info(\"Autorestarting worker after current request.\")\n self.stop()\n\n def watchdog(self):\n if self.alive:\n self.notify()\n\n if self.ppid != os.getppid():\n self.log.info(\"Parent changed, shutting down: %s\", self)\n self.stop()\n\n def run(self):\n self.ioloop = IOLoop.instance()\n self.alive = True\n PeriodicCallback(self.watchdog, 1000, io_loop=self.ioloop).start()\n\n # Assume the app is a WSGI callable if its not an\n # instance of tornado.web.Application or is an\n # instance of tornado.wsgi.WSGIApplication\n app = self.wsgi\n if not isinstance(app, tornado.web.Application) or \\\n isinstance(app, tornado.wsgi.WSGIApplication):\n app = WSGIContainer(app)\n\n # Monkey-patching HTTPConnection.finish to count the\n # number of requests being handled by Tornado. This\n # will help gunicorn shutdown the worker if max_requests\n # is exceeded.\n httpserver = sys.modules[\"tornado.httpserver\"]\n old_connection_finish = httpserver.HTTPConnection.finish\n\n def finish(other):\n self.handle_request()\n old_connection_finish(other)\n httpserver.HTTPConnection.finish = finish\n sys.modules[\"tornado.httpserver\"] = httpserver\n\n if self.cfg.is_ssl:\n server = tornado.httpserver.HTTPServer(app, io_loop=self.ioloop,\n ssl_options=self.cfg.ssl_options)\n else:\n server = tornado.httpserver.HTTPServer(app,\n io_loop=self.ioloop)\n\n self.server = server\n\n for s in self.sockets:\n s.setblocking(0)\n if hasattr(server, \"add_socket\"): # tornado > 2.0\n server.add_socket(s)\n elif hasattr(server, \"_sockets\"): # tornado 2.0\n server._sockets[s.fileno()] = s\n\n server.no_keep_alive = self.cfg.keepalive <= 0\n server.start(num_processes=1)\n\n self.ioloop.start()\n\n def stop(self):\n if hasattr(self, 'server'):\n try:\n self.server.stop()\n except Exception:\n pass\n PeriodicCallback(self.stop_ioloop, 1000, io_loop=self.ioloop).start()\n\n def stop_ioloop(self):\n if not self.ioloop._callbacks and len(self.ioloop._timeouts) <= 1:\n self.ioloop.stop()\n", "path": "gunicorn/workers/gtornado.py"}]}
1,735
374
gh_patches_debug_61381
rasdani/github-patches
git_diff
tensorflow__addons-1213
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Proposal: Upload the nightlies at each commit on the master branch We already build the wheels already anyway. It's just missing the push to pypi. Yes/No? </issue> <code> [start of setup.py] 1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 """TensorFlow Addons. 16 17 TensorFlow Addons is a repository of contributions that conform to well- 18 established API patterns, but implement new functionality not available 19 in core TensorFlow. TensorFlow natively supports a large number of 20 operators, layers, metrics, losses, and optimizers. However, in a fast 21 moving field like ML, there are many interesting new developments that 22 cannot be integrated into core TensorFlow (because their broad 23 applicability is not yet clear, or it is mostly used by a smaller subset 24 of the community). 25 """ 26 27 import os 28 import sys 29 30 from datetime import datetime 31 from setuptools import find_packages 32 from setuptools import setup 33 from setuptools.dist import Distribution 34 from setuptools import Extension 35 36 DOCLINES = __doc__.split("\n") 37 38 TFA_NIGHTLY = "tfa-nightly" 39 TFA_RELEASE = "tensorflow-addons" 40 41 if "--nightly" in sys.argv: 42 project_name = TFA_NIGHTLY 43 nightly_idx = sys.argv.index("--nightly") 44 sys.argv.pop(nightly_idx) 45 else: 46 project_name = TFA_RELEASE 47 48 # Version 49 version = {} 50 base_dir = os.path.dirname(os.path.abspath(__file__)) 51 with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp: 52 exec(fp.read(), version) 53 54 if project_name == TFA_NIGHTLY: 55 version["__version__"] += datetime.strftime(datetime.today(), "%Y%m%d") 56 57 with open("requirements.txt") as f: 58 required_pkgs = f.read().splitlines() 59 60 # Manylinux2010 requires a patch for platlib 61 if ( 62 sys.platform.startswith("linux") 63 and os.environ.get("TF_ADDONS_NO_BUILD", "0") == "0" 64 ): 65 ext_modules = [Extension("_foo", ["stub.cc"])] 66 else: 67 ext_modules = [] 68 69 70 class BinaryDistribution(Distribution): 71 """This class is needed in order to create OS specific wheels.""" 72 73 def has_ext_modules(self): 74 return True 75 76 77 setup( 78 name=project_name, 79 version=version["__version__"], 80 description=DOCLINES[0], 81 long_description="\n".join(DOCLINES[2:]), 82 author="Google Inc.", 83 author_email="[email protected]", 84 packages=find_packages(), 85 ext_modules=ext_modules, 86 install_requires=required_pkgs, 87 include_package_data=True, 88 zip_safe=False, 89 distclass=BinaryDistribution, 90 classifiers=[ 91 "Development Status :: 4 - Beta", 92 "Intended Audience :: Developers", 93 "Intended Audience :: Education", 94 "Intended Audience :: Science/Research", 95 "License :: OSI Approved :: Apache Software License", 96 "Programming Language :: Python :: 3.5", 97 "Programming Language :: Python :: 3.6", 98 "Programming Language :: Python :: 3.7", 99 "Topic :: Scientific/Engineering :: Mathematics", 100 "Topic :: Software Development :: Libraries :: Python Modules", 101 "Topic :: Software Development :: Libraries", 102 ], 103 license="Apache 2.0", 104 keywords="tensorflow addons machine learning", 105 ) 106 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -52,7 +52,7 @@ exec(fp.read(), version) if project_name == TFA_NIGHTLY: - version["__version__"] += datetime.strftime(datetime.today(), "%Y%m%d") + version["__version__"] += datetime.now().strftime("%Y%m%d%H%M%S") with open("requirements.txt") as f: required_pkgs = f.read().splitlines()
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,7 +52,7 @@\n exec(fp.read(), version)\n \n if project_name == TFA_NIGHTLY:\n- version[\"__version__\"] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n+ version[\"__version__\"] += datetime.now().strftime(\"%Y%m%d%H%M%S\")\n \n with open(\"requirements.txt\") as f:\n required_pkgs = f.read().splitlines()\n", "issue": "Proposal: Upload the nightlies at each commit on the master branch\nWe already build the wheels already anyway. It's just missing the push to pypi. Yes/No?\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nimport os\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split(\"\\n\")\n\nTFA_NIGHTLY = \"tfa-nightly\"\nTFA_RELEASE = \"tensorflow-addons\"\n\nif \"--nightly\" in sys.argv:\n project_name = TFA_NIGHTLY\n nightly_idx = sys.argv.index(\"--nightly\")\n sys.argv.pop(nightly_idx)\nelse:\n project_name = TFA_RELEASE\n\n# Version\nversion = {}\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n exec(fp.read(), version)\n\nif project_name == TFA_NIGHTLY:\n version[\"__version__\"] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n\nwith open(\"requirements.txt\") as f:\n required_pkgs = f.read().splitlines()\n\n# Manylinux2010 requires a patch for platlib\nif (\n sys.platform.startswith(\"linux\")\n and os.environ.get(\"TF_ADDONS_NO_BUILD\", \"0\") == \"0\"\n):\n ext_modules = [Extension(\"_foo\", [\"stub.cc\"])]\nelse:\n ext_modules = []\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nsetup(\n name=project_name,\n version=version[\"__version__\"],\n description=DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author=\"Google Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n ext_modules=ext_modules,\n install_requires=required_pkgs,\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Libraries\",\n ],\n license=\"Apache 2.0\",\n keywords=\"tensorflow addons machine learning\",\n)\n", "path": "setup.py"}]}
1,567
112
gh_patches_debug_34411
rasdani/github-patches
git_diff
ESMCI__cime-3079
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Branch a single instance case from a multi-instance case When a multi-instance CAM forecast fails, I want to use one of the failed instances as an exact restart in a single instance case, to speed debugging. Building it as hybrid or startup won't work because for CAM those are not exact restarts; they use the CAM initial file I first tried building a single instance branch run with the multi-instance as the refcase. That branched the entire multi-instance run. Then I tried copying all of the restart files for 1 instance into a new directory, "Restarts", with instance numbers removed from the file names. I built the single instance case with Restarts as the RUN_REFDIR. It built, but when I tried to run it it complained about a mozart file already existing in the run directory. I believe that an earlier stage of the submit process created that file, so I'm stumped about how to fix this problem. I've played with GET_REFCASE and CONTINUE_RUN and other things, but have not found the magic combination. Am I missing something, or is this a new capability that hasn't been implemented? $CASEROOT = /gpfs/fs1/work/raeder/Exp/Debug_lwdn4 $RUNDIR = /gpfs/fs1/scratch/raeder/Debug_lwdn4/run $CESMROOT = /glade/work/raeder/Models/cesm2_2_maint-5.6 Thanks Kevin </issue> <code> [start of scripts/lib/CIME/Servers/wget.py] 1 """ 2 WGET Server class. Interact with a server using WGET protocol 3 """ 4 # pylint: disable=super-init-not-called 5 from CIME.XML.standard_module_setup import * 6 from CIME.Servers.generic_server import GenericServer 7 logger = logging.getLogger(__name__) 8 9 class WGET(GenericServer): 10 def __init__(self, address, user='', passwd=''): 11 self._args = '' 12 if user: 13 self._args += "--user {} ".format(user) 14 if passwd: 15 self._args += "--password {} ".format(passwd) 16 self._server_loc = address 17 18 err = run_cmd("wget {} --spider {}".format(self._args, address))[0] 19 expect(err == 0,"Could not connect to repo '{0}'\nThis is most likely either a proxy, or network issue .") 20 21 22 def fileexists(self, rel_path): 23 full_url = os.path.join(self._server_loc, rel_path) 24 stat, out, err = run_cmd("wget {} --spider {}".format(self._args, full_url)) 25 if (stat != 0): 26 logging.warning("FAIL: Repo '{}' does not have file '{}'\nReason:{}\n{}\n".format(self._server_loc, full_url, out.encode('utf-8'), err.encode('utf-8'))) 27 return False 28 return True 29 30 def getfile(self, rel_path, full_path): 31 full_url = os.path.join(self._server_loc, rel_path) 32 stat, output, errput = \ 33 run_cmd("wget {} {} -nc --output-document {}".format(self._args, full_url, full_path)) 34 if (stat != 0): 35 logging.warning("wget failed with output: {} and errput {}\n".format(output, errput)) 36 # wget puts an empty file if it fails. 37 try: 38 os.remove(full_path) 39 except OSError: 40 pass 41 return False 42 else: 43 logging.info("SUCCESS\n") 44 return True 45 46 def getdirectory(self, rel_path, full_path): 47 full_url = os.path.join(self._server_loc, rel_path) 48 stat, output, errput = \ 49 run_cmd("wget {} {} -r -N --no-directories ".format(self._args, full_url+os.sep), from_dir=full_path) 50 logger.debug(output) 51 logger.debug(errput) 52 if (stat != 0): 53 logging.warning("wget failed with output: {} and errput {}\n".format(output, errput)) 54 # wget puts an empty file if it fails. 55 try: 56 os.remove(full_path) 57 except OSError: 58 pass 59 return False 60 else: 61 logging.info("SUCCESS\n") 62 return True 63 [end of scripts/lib/CIME/Servers/wget.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/lib/CIME/Servers/wget.py b/scripts/lib/CIME/Servers/wget.py --- a/scripts/lib/CIME/Servers/wget.py +++ b/scripts/lib/CIME/Servers/wget.py @@ -15,9 +15,9 @@ self._args += "--password {} ".format(passwd) self._server_loc = address - err = run_cmd("wget {} --spider {}".format(self._args, address))[0] - expect(err == 0,"Could not connect to repo '{0}'\nThis is most likely either a proxy, or network issue .") - + cmd = "wget {} --no-check-certificate --spider {}".format(self._args, address) + err, output, _ = run_cmd(cmd, combine_output=True) + expect(err == 0,"Could not connect to repo via '{}'\nThis is most likely either a proxy, or network issue.\nOutput:\n{}".format(cmd, output.encode('utf-8'))) def fileexists(self, rel_path): full_url = os.path.join(self._server_loc, rel_path) @@ -30,9 +30,9 @@ def getfile(self, rel_path, full_path): full_url = os.path.join(self._server_loc, rel_path) stat, output, errput = \ - run_cmd("wget {} {} -nc --output-document {}".format(self._args, full_url, full_path)) + run_cmd("wget {} {} -nc --no-check-certificate --output-document {}".format(self._args, full_url, full_path)) if (stat != 0): - logging.warning("wget failed with output: {} and errput {}\n".format(output, errput)) + logging.warning("wget failed with output: {} and errput {}\n".format(output.encode('utf-8'), errput.encode('utf-8'))) # wget puts an empty file if it fails. try: os.remove(full_path) @@ -46,7 +46,7 @@ def getdirectory(self, rel_path, full_path): full_url = os.path.join(self._server_loc, rel_path) stat, output, errput = \ - run_cmd("wget {} {} -r -N --no-directories ".format(self._args, full_url+os.sep), from_dir=full_path) + run_cmd("wget {} {} -r -N --no-check-certificate --no-directories ".format(self._args, full_url+os.sep), from_dir=full_path) logger.debug(output) logger.debug(errput) if (stat != 0):
{"golden_diff": "diff --git a/scripts/lib/CIME/Servers/wget.py b/scripts/lib/CIME/Servers/wget.py\n--- a/scripts/lib/CIME/Servers/wget.py\n+++ b/scripts/lib/CIME/Servers/wget.py\n@@ -15,9 +15,9 @@\n self._args += \"--password {} \".format(passwd)\n self._server_loc = address\n \n- err = run_cmd(\"wget {} --spider {}\".format(self._args, address))[0]\n- expect(err == 0,\"Could not connect to repo '{0}'\\nThis is most likely either a proxy, or network issue .\")\n-\n+ cmd = \"wget {} --no-check-certificate --spider {}\".format(self._args, address)\n+ err, output, _ = run_cmd(cmd, combine_output=True)\n+ expect(err == 0,\"Could not connect to repo via '{}'\\nThis is most likely either a proxy, or network issue.\\nOutput:\\n{}\".format(cmd, output.encode('utf-8')))\n \n def fileexists(self, rel_path):\n full_url = os.path.join(self._server_loc, rel_path)\n@@ -30,9 +30,9 @@\n def getfile(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n- run_cmd(\"wget {} {} -nc --output-document {}\".format(self._args, full_url, full_path))\n+ run_cmd(\"wget {} {} -nc --no-check-certificate --output-document {}\".format(self._args, full_url, full_path))\n if (stat != 0):\n- logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output, errput))\n+ logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output.encode('utf-8'), errput.encode('utf-8')))\n # wget puts an empty file if it fails.\n try:\n os.remove(full_path)\n@@ -46,7 +46,7 @@\n def getdirectory(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n- run_cmd(\"wget {} {} -r -N --no-directories \".format(self._args, full_url+os.sep), from_dir=full_path)\n+ run_cmd(\"wget {} {} -r -N --no-check-certificate --no-directories \".format(self._args, full_url+os.sep), from_dir=full_path)\n logger.debug(output)\n logger.debug(errput)\n if (stat != 0):\n", "issue": "Branch a single instance case from a multi-instance case\nWhen a multi-instance CAM forecast fails, I want to use one of the failed instances\r\nas an exact restart in a single instance case, to speed debugging.\r\nBuilding it as hybrid or startup won't work because for CAM \r\nthose are not exact restarts; they use the CAM initial file\r\nI first tried building a single instance branch run with the multi-instance\r\nas the refcase. That branched the entire multi-instance run.\r\n\r\nThen I tried copying all of the restart files for 1 instance into a new directory, \r\n\"Restarts\", with instance numbers removed from the file names.\r\nI built the single instance case with Restarts as the RUN_REFDIR.\r\nIt built, but when I tried to run it it complained about a mozart file\r\nalready existing in the run directory. I believe that an earlier stage\r\nof the submit process created that file, so I'm stumped about how\r\nto fix this problem. I've played with GET_REFCASE and CONTINUE_RUN\r\nand other things, but have not found the magic combination.\r\nAm I missing something, or is this a new capability that hasn't been implemented?\r\n\r\n$CASEROOT = /gpfs/fs1/work/raeder/Exp/Debug_lwdn4\r\n$RUNDIR = /gpfs/fs1/scratch/raeder/Debug_lwdn4/run\r\n$CESMROOT = /glade/work/raeder/Models/cesm2_2_maint-5.6\r\n\r\nThanks\r\nKevin\r\n\n", "before_files": [{"content": "\"\"\"\nWGET Server class. Interact with a server using WGET protocol\n\"\"\"\n# pylint: disable=super-init-not-called\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.Servers.generic_server import GenericServer\nlogger = logging.getLogger(__name__)\n\nclass WGET(GenericServer):\n def __init__(self, address, user='', passwd=''):\n self._args = ''\n if user:\n self._args += \"--user {} \".format(user)\n if passwd:\n self._args += \"--password {} \".format(passwd)\n self._server_loc = address\n\n err = run_cmd(\"wget {} --spider {}\".format(self._args, address))[0]\n expect(err == 0,\"Could not connect to repo '{0}'\\nThis is most likely either a proxy, or network issue .\")\n\n\n def fileexists(self, rel_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, out, err = run_cmd(\"wget {} --spider {}\".format(self._args, full_url))\n if (stat != 0):\n logging.warning(\"FAIL: Repo '{}' does not have file '{}'\\nReason:{}\\n{}\\n\".format(self._server_loc, full_url, out.encode('utf-8'), err.encode('utf-8')))\n return False\n return True\n\n def getfile(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n run_cmd(\"wget {} {} -nc --output-document {}\".format(self._args, full_url, full_path))\n if (stat != 0):\n logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output, errput))\n # wget puts an empty file if it fails.\n try:\n os.remove(full_path)\n except OSError:\n pass\n return False\n else:\n logging.info(\"SUCCESS\\n\")\n return True\n\n def getdirectory(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n run_cmd(\"wget {} {} -r -N --no-directories \".format(self._args, full_url+os.sep), from_dir=full_path)\n logger.debug(output)\n logger.debug(errput)\n if (stat != 0):\n logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output, errput))\n # wget puts an empty file if it fails.\n try:\n os.remove(full_path)\n except OSError:\n pass\n return False\n else:\n logging.info(\"SUCCESS\\n\")\n return True\n", "path": "scripts/lib/CIME/Servers/wget.py"}]}
1,561
573
gh_patches_debug_14006
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3341
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider northern_california_breweries is broken During the global build at 2021-06-23-14-42-18, spider **northern_california_breweries** failed with **0 features** and **1 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/logs/northern_california_breweries.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/northern_california_breweries.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/northern_california_breweries.geojson)) </issue> <code> [start of locations/spiders/northern_california_breweries.py] 1 # -*- coding: utf-8 -*- 2 import scrapy 3 from locations.items import GeojsonPointItem 4 import json 5 import re 6 7 class NorthernCaliforniaBreweriesSpider(scrapy.Spider): 8 name = "northern_california_breweries" 9 allowed_domains = ["projects.sfchronicle.com"] 10 start_urls = ( 11 'http://projects.sfchronicle.com/2017/brewery-map/', 12 ) 13 14 def parse(self, response): 15 beerData = response.xpath("//*[text()[contains(.,'beerData')]]").extract_first() 16 matches = re.search(r"var beerData = (\[(.*)\])", beerData) 17 jsonData = matches.group(0).replace("var beerData = ","") 18 breweryList = json.loads(jsonData) 19 20 for item in breweryList: 21 yield GeojsonPointItem( 22 ref=item.get('Brewery'), 23 lat=float(item.get('Latitude')), 24 lon=float(item.get('Longitude')), 25 addr_full=item.get('Address'), 26 city=item.get('City'), 27 state="CA", 28 website=item.get('Website'), 29 ) 30 [end of locations/spiders/northern_california_breweries.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/northern_california_breweries.py b/locations/spiders/northern_california_breweries.py --- a/locations/spiders/northern_california_breweries.py +++ b/locations/spiders/northern_california_breweries.py @@ -18,10 +18,19 @@ breweryList = json.loads(jsonData) for item in breweryList: + latitude = None + longitude = None + + if item.get('Latitude') is not None: + latitude = float(item.get('Latitude')) + + if item.get('Longitude') is not None: + longitude = float(item.get('Longitude')) + yield GeojsonPointItem( ref=item.get('Brewery'), - lat=float(item.get('Latitude')), - lon=float(item.get('Longitude')), + lat=latitude, + lon=longitude, addr_full=item.get('Address'), city=item.get('City'), state="CA",
{"golden_diff": "diff --git a/locations/spiders/northern_california_breweries.py b/locations/spiders/northern_california_breweries.py\n--- a/locations/spiders/northern_california_breweries.py\n+++ b/locations/spiders/northern_california_breweries.py\n@@ -18,10 +18,19 @@\n breweryList = json.loads(jsonData)\n \n for item in breweryList:\n+ latitude = None\n+ longitude = None\n+\n+ if item.get('Latitude') is not None:\n+ latitude = float(item.get('Latitude'))\n+\n+ if item.get('Longitude') is not None:\n+ longitude = float(item.get('Longitude'))\n+ \n yield GeojsonPointItem(\n ref=item.get('Brewery'),\n- lat=float(item.get('Latitude')),\n- lon=float(item.get('Longitude')),\n+ lat=latitude,\n+ lon=longitude,\n addr_full=item.get('Address'),\n city=item.get('City'),\n state=\"CA\",\n", "issue": "Spider northern_california_breweries is broken\nDuring the global build at 2021-06-23-14-42-18, spider **northern_california_breweries** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/logs/northern_california_breweries.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/northern_california_breweries.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-23-14-42-18/output/northern_california_breweries.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\nimport re\n\nclass NorthernCaliforniaBreweriesSpider(scrapy.Spider):\n name = \"northern_california_breweries\"\n allowed_domains = [\"projects.sfchronicle.com\"]\n start_urls = (\n 'http://projects.sfchronicle.com/2017/brewery-map/',\n )\n\n def parse(self, response):\n beerData = response.xpath(\"//*[text()[contains(.,'beerData')]]\").extract_first()\n matches = re.search(r\"var beerData = (\\[(.*)\\])\", beerData)\n jsonData = matches.group(0).replace(\"var beerData = \",\"\")\n breweryList = json.loads(jsonData)\n\n for item in breweryList:\n yield GeojsonPointItem(\n ref=item.get('Brewery'),\n lat=float(item.get('Latitude')),\n lon=float(item.get('Longitude')),\n addr_full=item.get('Address'),\n city=item.get('City'),\n state=\"CA\",\n website=item.get('Website'),\n )\n", "path": "locations/spiders/northern_california_breweries.py"}]}
1,040
222
gh_patches_debug_59836
rasdani/github-patches
git_diff
angr__angr-4105
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Duplicate member docs on subclasses ### Description e.g. the documentation on SimCC's members is also present on SimCCUsercall. This is a huge problem considering that the api docs page is already fucking gigantic, this is just making it multiplicatively bigger. ### Steps to reproduce the bug _No response_ ### Environment _No response_ ### Additional context _No response_ </issue> <code> [start of docs/conf.py] 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # For the full list of built-in configuration values, see the documentation: 4 # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 6 import datetime 7 8 # -- Project information ----------------------------------------------------- 9 # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 10 11 project = "angr" 12 project_copyright = f"{datetime.datetime.now().year}, The angr Project contributors" 13 author = "The angr Project" 14 15 # -- General configuration --------------------------------------------------- 16 # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 17 18 extensions = [ 19 "sphinx.ext.autodoc", 20 "sphinx.ext.autosectionlabel", 21 "sphinx.ext.autosummary", 22 "sphinx.ext.coverage", 23 "sphinx.ext.intersphinx", 24 "sphinx.ext.napoleon", 25 "sphinx.ext.todo", 26 "sphinx.ext.viewcode", 27 "sphinx_autodoc_typehints", 28 "myst_parser", 29 ] 30 31 templates_path = ["_templates"] 32 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 33 34 # -- Options for autodoc ----------------------------------------------------- 35 # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#configuration 36 autoclass_content = "class" 37 autodoc_default_options = { 38 "members": True, 39 "member-order": "bysource", 40 "inherited-members": True, 41 "show-inheritance": True, 42 "special-members": "__init__", 43 "undoc-members": True, 44 } 45 autodoc_inherit_docstrings = True 46 autodoc_typehints = "both" 47 48 # -- Options for coverage ---------------------------------------------------- 49 # https://www.sphinx-doc.org/en/master/usage/extensions/coverage.html 50 coverage_write_headline = False 51 52 coverage_ignore_pyobjects = [ 53 "angr.analyses.decompiler.structured_codegen.c.StructuredCodeGenerator", # Alias to CStructuredCodeGenerator 54 "angr.sim_type.SimTypeFixedSizeArray", # Alias to SimTypeArray 55 ] 56 57 # -- Options for intersphinx ------------------------------------------------- 58 # https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html 59 intersphinx_mapping = { 60 "python": ("https://docs.python.org/3", None), 61 "ailment": ("https://docs.angr.io/projects/ailment/en/latest/", None), 62 "archinfo": ("https://docs.angr.io/projects/archinfo/en/latest/", None), 63 "claripy": ("https://docs.angr.io/projects/claripy/en/latest/", None), 64 "cle": ("https://docs.angr.io/projects/cle/en/latest/", None), 65 "pypcode": ("https://docs.angr.io/projects/pypcode/en/latest/", None), 66 "pyvex": ("https://docs.angr.io/projects/pyvex/en/latest/", None), 67 } 68 69 # -- Options for todos ------------------------------------------------------- 70 # https://www.sphinx-doc.org/en/master/usage/extensions/todo.html 71 todo_include_todos = True 72 73 # -- Options for HTML output ------------------------------------------------- 74 # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 75 76 html_theme = "furo" 77 html_static_path = ["_static"] 78 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -37,7 +37,6 @@ autodoc_default_options = { "members": True, "member-order": "bysource", - "inherited-members": True, "show-inheritance": True, "special-members": "__init__", "undoc-members": True,
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -37,7 +37,6 @@\n autodoc_default_options = {\n \"members\": True,\n \"member-order\": \"bysource\",\n- \"inherited-members\": True,\n \"show-inheritance\": True,\n \"special-members\": \"__init__\",\n \"undoc-members\": True,\n", "issue": "Duplicate member docs on subclasses\n### Description\n\ne.g. the documentation on SimCC's members is also present on SimCCUsercall. This is a huge problem considering that the api docs page is already fucking gigantic, this is just making it multiplicatively bigger.\n\n### Steps to reproduce the bug\n\n_No response_\n\n### Environment\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\nimport datetime\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = \"angr\"\nproject_copyright = f\"{datetime.datetime.now().year}, The angr Project contributors\"\nauthor = \"The angr Project\"\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"sphinx_autodoc_typehints\",\n \"myst_parser\",\n]\n\ntemplates_path = [\"_templates\"]\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# -- Options for autodoc -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#configuration\nautoclass_content = \"class\"\nautodoc_default_options = {\n \"members\": True,\n \"member-order\": \"bysource\",\n \"inherited-members\": True,\n \"show-inheritance\": True,\n \"special-members\": \"__init__\",\n \"undoc-members\": True,\n}\nautodoc_inherit_docstrings = True\nautodoc_typehints = \"both\"\n\n# -- Options for coverage ----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/extensions/coverage.html\ncoverage_write_headline = False\n\ncoverage_ignore_pyobjects = [\n \"angr.analyses.decompiler.structured_codegen.c.StructuredCodeGenerator\", # Alias to CStructuredCodeGenerator\n \"angr.sim_type.SimTypeFixedSizeArray\", # Alias to SimTypeArray\n]\n\n# -- Options for intersphinx -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"ailment\": (\"https://docs.angr.io/projects/ailment/en/latest/\", None),\n \"archinfo\": (\"https://docs.angr.io/projects/archinfo/en/latest/\", None),\n \"claripy\": (\"https://docs.angr.io/projects/claripy/en/latest/\", None),\n \"cle\": (\"https://docs.angr.io/projects/cle/en/latest/\", None),\n \"pypcode\": (\"https://docs.angr.io/projects/pypcode/en/latest/\", None),\n \"pyvex\": (\"https://docs.angr.io/projects/pyvex/en/latest/\", None),\n}\n\n# -- Options for todos -------------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/extensions/todo.html\ntodo_include_todos = True\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = \"furo\"\nhtml_static_path = [\"_static\"]\n", "path": "docs/conf.py"}]}
1,454
94
gh_patches_debug_40226
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-3113
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests </issue> <code> [start of applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py] 1 from typing import Optional 2 3 import torch.nn as nn 4 from transformers.models.gpt2.configuration_gpt2 import GPT2Config 5 from transformers.models.gpt2.modeling_gpt2 import GPT2Model 6 7 from ..base import Critic 8 9 10 class GPTCritic(Critic): 11 """ 12 GPT Critic model. 13 14 Args: 15 pretrained (str): Pretrained model name or path. 16 config (GPT2Config): Model config. 17 checkpoint (bool): Enable gradient checkpointing. 18 """ 19 20 def __init__(self, 21 pretrained: Optional[str] = None, 22 config: Optional[GPT2Config] = None, 23 checkpoint: bool = False, 24 **kwargs) -> None: 25 if pretrained is not None: 26 model = GPT2Model.from_pretrained(pretrained) 27 elif config is not None: 28 model = GPT2Model(config) 29 else: 30 model = GPT2Model(GPT2Config()) 31 if checkpoint: 32 model.gradient_checkpointing_enable() 33 value_head = nn.Linear(model.config.n_embd, 1) 34 super().__init__(model, value_head, **kwargs) 35 [end of applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py] [start of applications/ChatGPT/chatgpt/models/opt/opt_critic.py] 1 from typing import Optional 2 3 import torch.nn as nn 4 from transformers.models.opt.configuration_opt import OPTConfig 5 from transformers.models.opt.modeling_opt import OPTModel 6 7 from ..base import Critic 8 9 10 class OPTCritic(Critic): 11 """ 12 OPT Critic model. 13 14 Args: 15 pretrained (str): Pretrained model name or path. 16 config (OPTConfig): Model config. 17 checkpoint (bool): Enable gradient checkpointing. 18 lora_rank (int): Rank of the low-rank approximation. 19 lora_train_bias (str): LoRA bias training mode. 20 """ 21 22 def __init__(self, 23 pretrained: Optional[str] = None, 24 config: Optional[OPTConfig] = None, 25 checkpoint: bool = False, 26 lora_rank: int = 0, 27 lora_train_bias: str = 'none', 28 **kwargs) -> None: 29 if pretrained is not None: 30 model = OPTModel.from_pretrained(pretrained) 31 elif config is not None: 32 model = OPTModel(config) 33 else: 34 model = OPTModel(OPTConfig()) 35 if checkpoint: 36 model.gradient_checkpointing_enable() 37 value_head = nn.Linear(model.config.hidden_size, 1) 38 super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs) 39 [end of applications/ChatGPT/chatgpt/models/opt/opt_critic.py] [start of applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py] 1 from typing import Optional 2 3 from transformers.models.gpt2.configuration_gpt2 import GPT2Config 4 from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel 5 6 from ..base import Actor 7 8 9 class GPTActor(Actor): 10 """ 11 GPT Actor model. 12 13 Args: 14 pretrained (str): Pretrained model name or path. 15 config (GPT2Config): Model config. 16 checkpoint (bool): Enable gradient checkpointing. 17 """ 18 19 def __init__(self, 20 pretrained: Optional[str] = None, 21 config: Optional[GPT2Config] = None, 22 checkpoint: bool = False) -> None: 23 if pretrained is not None: 24 model = GPT2LMHeadModel.from_pretrained(pretrained) 25 elif config is not None: 26 model = GPT2LMHeadModel(config) 27 else: 28 model = GPT2LMHeadModel(GPT2Config()) 29 if checkpoint: 30 model.gradient_checkpointing_enable() 31 super().__init__(model) 32 [end of applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py b/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py --- a/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py +++ b/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py @@ -14,12 +14,16 @@ pretrained (str): Pretrained model name or path. config (GPT2Config): Model config. checkpoint (bool): Enable gradient checkpointing. + lora_rank (int): Rank of the LoRa layer. + lora_train_bias (str): Bias training strategy for the LoRa layer. """ def __init__(self, pretrained: Optional[str] = None, config: Optional[GPT2Config] = None, - checkpoint: bool = False) -> None: + checkpoint: bool = False, + lora_rank: int = 0, + lora_train_bias: str = 'none') -> None: if pretrained is not None: model = GPT2LMHeadModel.from_pretrained(pretrained) elif config is not None: @@ -28,4 +32,4 @@ model = GPT2LMHeadModel(GPT2Config()) if checkpoint: model.gradient_checkpointing_enable() - super().__init__(model) + super().__init__(model, lora_rank, lora_train_bias) diff --git a/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py b/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py --- a/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py +++ b/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py @@ -15,13 +15,16 @@ pretrained (str): Pretrained model name or path. config (GPT2Config): Model config. checkpoint (bool): Enable gradient checkpointing. + lora_rank (int): Rank of the LO-RA decomposition. + lora_train_bias (str): LoRA bias training mode. """ def __init__(self, pretrained: Optional[str] = None, config: Optional[GPT2Config] = None, checkpoint: bool = False, - **kwargs) -> None: + lora_rank: int = 0, + lora_train_bias: str = 'none') -> None: if pretrained is not None: model = GPT2Model.from_pretrained(pretrained) elif config is not None: @@ -31,4 +34,4 @@ if checkpoint: model.gradient_checkpointing_enable() value_head = nn.Linear(model.config.n_embd, 1) - super().__init__(model, value_head, **kwargs) + super().__init__(model, value_head, lora_rank, lora_train_bias) diff --git a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py --- a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py +++ b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py @@ -34,5 +34,5 @@ model = OPTModel(OPTConfig()) if checkpoint: model.gradient_checkpointing_enable() - value_head = nn.Linear(model.config.hidden_size, 1) + value_head = nn.Linear(model.config.word_embed_proj_dim, 1) super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)
{"golden_diff": "diff --git a/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py b/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py\n--- a/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py\n+++ b/applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py\n@@ -14,12 +14,16 @@\n pretrained (str): Pretrained model name or path.\n config (GPT2Config): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n+ lora_rank (int): Rank of the LoRa layer.\n+ lora_train_bias (str): Bias training strategy for the LoRa layer.\n \"\"\"\n \n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[GPT2Config] = None,\n- checkpoint: bool = False) -> None:\n+ checkpoint: bool = False,\n+ lora_rank: int = 0,\n+ lora_train_bias: str = 'none') -> None:\n if pretrained is not None:\n model = GPT2LMHeadModel.from_pretrained(pretrained)\n elif config is not None:\n@@ -28,4 +32,4 @@\n model = GPT2LMHeadModel(GPT2Config())\n if checkpoint:\n model.gradient_checkpointing_enable()\n- super().__init__(model)\n+ super().__init__(model, lora_rank, lora_train_bias)\ndiff --git a/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py b/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py\n--- a/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py\n+++ b/applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py\n@@ -15,13 +15,16 @@\n pretrained (str): Pretrained model name or path.\n config (GPT2Config): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n+ lora_rank (int): Rank of the LO-RA decomposition.\n+ lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n \n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[GPT2Config] = None,\n checkpoint: bool = False,\n- **kwargs) -> None:\n+ lora_rank: int = 0,\n+ lora_train_bias: str = 'none') -> None:\n if pretrained is not None:\n model = GPT2Model.from_pretrained(pretrained)\n elif config is not None:\n@@ -31,4 +34,4 @@\n if checkpoint:\n model.gradient_checkpointing_enable()\n value_head = nn.Linear(model.config.n_embd, 1)\n- super().__init__(model, value_head, **kwargs)\n+ super().__init__(model, value_head, lora_rank, lora_train_bias)\ndiff --git a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py\n--- a/applications/ChatGPT/chatgpt/models/opt/opt_critic.py\n+++ b/applications/ChatGPT/chatgpt/models/opt/opt_critic.py\n@@ -34,5 +34,5 @@\n model = OPTModel(OPTConfig())\n if checkpoint:\n model.gradient_checkpointing_enable()\n- value_head = nn.Linear(model.config.hidden_size, 1)\n+ value_head = nn.Linear(model.config.word_embed_proj_dim, 1)\n super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import Optional\n\nimport torch.nn as nn\nfrom transformers.models.gpt2.configuration_gpt2 import GPT2Config\nfrom transformers.models.gpt2.modeling_gpt2 import GPT2Model\n\nfrom ..base import Critic\n\n\nclass GPTCritic(Critic):\n \"\"\"\n GPT Critic model.\n\n Args:\n pretrained (str): Pretrained model name or path.\n config (GPT2Config): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n \"\"\"\n\n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[GPT2Config] = None,\n checkpoint: bool = False,\n **kwargs) -> None:\n if pretrained is not None:\n model = GPT2Model.from_pretrained(pretrained)\n elif config is not None:\n model = GPT2Model(config)\n else:\n model = GPT2Model(GPT2Config())\n if checkpoint:\n model.gradient_checkpointing_enable()\n value_head = nn.Linear(model.config.n_embd, 1)\n super().__init__(model, value_head, **kwargs)\n", "path": "applications/ChatGPT/chatgpt/models/gpt/gpt_critic.py"}, {"content": "from typing import Optional\n\nimport torch.nn as nn\nfrom transformers.models.opt.configuration_opt import OPTConfig\nfrom transformers.models.opt.modeling_opt import OPTModel\n\nfrom ..base import Critic\n\n\nclass OPTCritic(Critic):\n \"\"\"\n OPT Critic model.\n\n Args:\n pretrained (str): Pretrained model name or path.\n config (OPTConfig): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n lora_rank (int): Rank of the low-rank approximation.\n lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n\n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[OPTConfig] = None,\n checkpoint: bool = False,\n lora_rank: int = 0,\n lora_train_bias: str = 'none',\n **kwargs) -> None:\n if pretrained is not None:\n model = OPTModel.from_pretrained(pretrained)\n elif config is not None:\n model = OPTModel(config)\n else:\n model = OPTModel(OPTConfig())\n if checkpoint:\n model.gradient_checkpointing_enable()\n value_head = nn.Linear(model.config.hidden_size, 1)\n super().__init__(model, value_head, lora_rank, lora_train_bias, **kwargs)\n", "path": "applications/ChatGPT/chatgpt/models/opt/opt_critic.py"}, {"content": "from typing import Optional\n\nfrom transformers.models.gpt2.configuration_gpt2 import GPT2Config\nfrom transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel\n\nfrom ..base import Actor\n\n\nclass GPTActor(Actor):\n \"\"\"\n GPT Actor model.\n\n Args:\n pretrained (str): Pretrained model name or path.\n config (GPT2Config): Model config.\n checkpoint (bool): Enable gradient checkpointing.\n \"\"\"\n\n def __init__(self,\n pretrained: Optional[str] = None,\n config: Optional[GPT2Config] = None,\n checkpoint: bool = False) -> None:\n if pretrained is not None:\n model = GPT2LMHeadModel.from_pretrained(pretrained)\n elif config is not None:\n model = GPT2LMHeadModel(config)\n else:\n model = GPT2LMHeadModel(GPT2Config())\n if checkpoint:\n model.gradient_checkpointing_enable()\n super().__init__(model)\n", "path": "applications/ChatGPT/chatgpt/models/gpt/gpt_actor.py"}]}
1,579
811
gh_patches_debug_21007
rasdani/github-patches
git_diff
joke2k__faker-213
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `.prefix`/`.suffix` returns a tuple instead of a single value `.prefix` (and `.suffix`) can occasionally return a tuple of values instead of a single value when `prefixes_male` and `prefixes_female` (or `suffixes_*`) are present in the provider. [See here for the code responsible.](https://github.com/joke2k/faker/blob/2af330e09d84306d10921fed00ad2e5cc8e3d36f/faker/providers/person/__init__.py#L93-L94) I wasn't sure if this was intentional (it's documented to do so -- then again, the documentation is autogenerated, isn't it?), so I didn't make a PR yet, but it's certainly counterintuitive. </issue> <code> [start of faker/providers/person/__init__.py] 1 localized = True 2 3 from .. import BaseProvider 4 5 6 class Provider(BaseProvider): 7 formats = ['{{first_name}} {{last_name}}', ] 8 9 first_names = ['John', 'Jane'] 10 11 last_names = ['Doe', ] 12 13 def name(self): 14 """ 15 :example 'John Doe' 16 """ 17 pattern = self.random_element(self.formats) 18 return self.generator.parse(pattern) 19 20 @classmethod 21 def first_name(cls): 22 return cls.random_element(cls.first_names) 23 24 @classmethod 25 def last_name(cls): 26 return cls.random_element(cls.last_names) 27 28 def name_male(self): 29 if hasattr(self, 'formats_male'): 30 formats = self.formats_male 31 else: 32 formats = self.formats 33 pattern = self.random_element(formats) 34 return self.generator.parse(pattern) 35 36 def name_female(self): 37 if hasattr(self, 'formats_female'): 38 formats = self.formats_female 39 else: 40 formats = self.formats 41 pattern = self.random_element(formats) 42 return self.generator.parse(pattern) 43 44 @classmethod 45 def first_name_male(cls): 46 if hasattr(cls, 'first_names_male'): 47 return cls.random_element(cls.first_names_male) 48 return cls.first_name() 49 50 @classmethod 51 def first_name_female(cls): 52 if hasattr(cls, 'first_names_female'): 53 return cls.random_element(cls.first_names_female) 54 return cls.first_name() 55 56 @classmethod 57 def last_name_male(cls): 58 if hasattr(cls, 'last_names_male'): 59 return cls.random_element(cls.last_names_male) 60 return cls.last_name() 61 62 @classmethod 63 def last_name_female(cls): 64 if hasattr(cls, 'last_names_female'): 65 return cls.random_element(cls.last_names_female) 66 return cls.last_name() 67 68 69 @classmethod 70 def prefix(cls): 71 if hasattr(cls, 'prefixes'): 72 return cls.random_element(cls.prefixes) 73 if hasattr(cls, 'prefixes_male') and hasattr(cls, 'prefixes_female'): 74 return cls.random_element((cls.prefixes_male, cls.prefixes_female)) 75 return '' 76 77 @classmethod 78 def prefix_male(cls): 79 if hasattr(cls, 'prefixes_male'): 80 return cls.random_element(cls.prefixes_male) 81 return cls.prefix() 82 83 @classmethod 84 def prefix_female(cls): 85 if hasattr(cls, 'prefixes_female'): 86 return cls.random_element(cls.prefixes_female) 87 return cls.prefix() 88 89 @classmethod 90 def suffix(cls): 91 if hasattr(cls, 'suffixes'): 92 return cls.random_element(cls.suffixes) 93 if hasattr(cls, 'suffixes_male') and hasattr(cls, 'suffixes_female'): 94 return cls.random_element((cls.suffixes_male, cls.suffixes_female)) 95 return '' 96 97 @classmethod 98 def suffix_male(cls): 99 if hasattr(cls, 'suffixes_male'): 100 return cls.random_element(cls.suffixes_male) 101 return cls.suffix() 102 103 @classmethod 104 def suffix_female(cls): 105 if hasattr(cls, 'suffixes_female'): 106 return cls.random_element(cls.suffixes_female) 107 return cls.suffix() 108 [end of faker/providers/person/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/faker/providers/person/__init__.py b/faker/providers/person/__init__.py --- a/faker/providers/person/__init__.py +++ b/faker/providers/person/__init__.py @@ -71,7 +71,8 @@ if hasattr(cls, 'prefixes'): return cls.random_element(cls.prefixes) if hasattr(cls, 'prefixes_male') and hasattr(cls, 'prefixes_female'): - return cls.random_element((cls.prefixes_male, cls.prefixes_female)) + prefixes = cls.random_element((cls.prefixes_male, cls.prefixes_female)) + return cls.random_element(prefixes) return '' @classmethod @@ -91,7 +92,8 @@ if hasattr(cls, 'suffixes'): return cls.random_element(cls.suffixes) if hasattr(cls, 'suffixes_male') and hasattr(cls, 'suffixes_female'): - return cls.random_element((cls.suffixes_male, cls.suffixes_female)) + suffixes = cls.random_element((cls.suffixes_male, cls.suffixes_female)) + return cls.random_element(suffixes) return '' @classmethod
{"golden_diff": "diff --git a/faker/providers/person/__init__.py b/faker/providers/person/__init__.py\n--- a/faker/providers/person/__init__.py\n+++ b/faker/providers/person/__init__.py\n@@ -71,7 +71,8 @@\n if hasattr(cls, 'prefixes'):\n return cls.random_element(cls.prefixes)\n if hasattr(cls, 'prefixes_male') and hasattr(cls, 'prefixes_female'):\n- return cls.random_element((cls.prefixes_male, cls.prefixes_female))\n+ prefixes = cls.random_element((cls.prefixes_male, cls.prefixes_female))\n+ return cls.random_element(prefixes)\n return ''\n \n @classmethod\n@@ -91,7 +92,8 @@\n if hasattr(cls, 'suffixes'):\n return cls.random_element(cls.suffixes)\n if hasattr(cls, 'suffixes_male') and hasattr(cls, 'suffixes_female'):\n- return cls.random_element((cls.suffixes_male, cls.suffixes_female))\n+ suffixes = cls.random_element((cls.suffixes_male, cls.suffixes_female))\n+ return cls.random_element(suffixes)\n return ''\n \n @classmethod\n", "issue": "`.prefix`/`.suffix` returns a tuple instead of a single value\n`.prefix` (and `.suffix`) can occasionally return a tuple of values instead of a single value when `prefixes_male` and `prefixes_female` (or `suffixes_*`) are present in the provider.\n\n[See here for the code responsible.](https://github.com/joke2k/faker/blob/2af330e09d84306d10921fed00ad2e5cc8e3d36f/faker/providers/person/__init__.py#L93-L94)\n\nI wasn't sure if this was intentional (it's documented to do so -- then again, the documentation is autogenerated, isn't it?), so I didn't make a PR yet, but it's certainly counterintuitive.\n\n", "before_files": [{"content": "localized = True\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n formats = ['{{first_name}} {{last_name}}', ]\n\n first_names = ['John', 'Jane']\n\n last_names = ['Doe', ]\n\n def name(self):\n \"\"\"\n :example 'John Doe'\n \"\"\"\n pattern = self.random_element(self.formats)\n return self.generator.parse(pattern)\n\n @classmethod\n def first_name(cls):\n return cls.random_element(cls.first_names)\n\n @classmethod\n def last_name(cls):\n return cls.random_element(cls.last_names)\n\n def name_male(self):\n if hasattr(self, 'formats_male'):\n formats = self.formats_male\n else:\n formats = self.formats\n pattern = self.random_element(formats)\n return self.generator.parse(pattern)\n\n def name_female(self):\n if hasattr(self, 'formats_female'):\n formats = self.formats_female\n else:\n formats = self.formats\n pattern = self.random_element(formats)\n return self.generator.parse(pattern)\n\n @classmethod\n def first_name_male(cls):\n if hasattr(cls, 'first_names_male'):\n return cls.random_element(cls.first_names_male)\n return cls.first_name()\n\n @classmethod\n def first_name_female(cls):\n if hasattr(cls, 'first_names_female'):\n return cls.random_element(cls.first_names_female)\n return cls.first_name()\n\n @classmethod\n def last_name_male(cls):\n if hasattr(cls, 'last_names_male'):\n return cls.random_element(cls.last_names_male)\n return cls.last_name()\n\n @classmethod\n def last_name_female(cls):\n if hasattr(cls, 'last_names_female'):\n return cls.random_element(cls.last_names_female)\n return cls.last_name()\n\n\n @classmethod\n def prefix(cls):\n if hasattr(cls, 'prefixes'):\n return cls.random_element(cls.prefixes)\n if hasattr(cls, 'prefixes_male') and hasattr(cls, 'prefixes_female'):\n return cls.random_element((cls.prefixes_male, cls.prefixes_female))\n return ''\n\n @classmethod\n def prefix_male(cls):\n if hasattr(cls, 'prefixes_male'):\n return cls.random_element(cls.prefixes_male)\n return cls.prefix()\n\n @classmethod\n def prefix_female(cls):\n if hasattr(cls, 'prefixes_female'):\n return cls.random_element(cls.prefixes_female)\n return cls.prefix()\n\n @classmethod\n def suffix(cls):\n if hasattr(cls, 'suffixes'):\n return cls.random_element(cls.suffixes)\n if hasattr(cls, 'suffixes_male') and hasattr(cls, 'suffixes_female'):\n return cls.random_element((cls.suffixes_male, cls.suffixes_female))\n return ''\n\n @classmethod\n def suffix_male(cls):\n if hasattr(cls, 'suffixes_male'):\n return cls.random_element(cls.suffixes_male)\n return cls.suffix()\n\n @classmethod\n def suffix_female(cls):\n if hasattr(cls, 'suffixes_female'):\n return cls.random_element(cls.suffixes_female)\n return cls.suffix()\n", "path": "faker/providers/person/__init__.py"}]}
1,597
253
gh_patches_debug_24821
rasdani/github-patches
git_diff
nilearn__nilearn-3077
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> nilearn.plotting overrides the matplotlib backend, causing troubles on remote SSH development I am working on a remote settings, accessing a machine (drago/margaret for INRIA context) through a PyCharm Remote Interpreter. I have been struggling to display nilearn figures in the scientific view of PyCharm, leading to a painful process of saving the image => rsync figures dir => visualize figures once there are on personal laptop. I did a simple experiment drawing first a simple `plot(list(range(10)))` which is displayed and then a nilearn figure which does not show up (script at the end of the issue). I found out the reason for this, which is that in `nilearn.plotting.__init__`, the matplotlib backend is set to `agg` if not in `[inline, agg]`, but the backend used initially (which is working for me) is `module://backend_interagg`. Setting the backend to the initial value after importing `nilearn.datasets` fixes the problem. I don't know exactly how those matplotlib backends work, if the list of accepted backends in `nilearn.datasets.__init__` could be extended or if we could have a more robust list of valid matplotlib backend. Also, I feel that nilearn shouldn't override matplotlib backend silently? <!--Please fill in the following information, to the best of your ability.--> Nilearn version: 0.7.1 ### Expected behavior Nilearn does not override matplotlib backend silently. ### Actual behavior Nilearn sets backend to "agg" if not in ["inline", "agg"], causing trouble with remote SSH development. ### Steps and code to reproduce bug Run the following script through SSH remote interpreter ```python import matplotlib initial_backend = matplotlib.get_backend().lower() print(initial_backend) import matplotlib.pyplot as plt import numpy as np arr = np.zeros((100, 100)) plt.figure() plt.plot(list(range(10))) plt.show() # >> The show is displayed in PyCharm from nilearn import datasets, plotting print(matplotlib.get_backend().lower()) # >> Backend has switched to "agg" sample_brain_map = datasets.fetch_neurovault_motor_task().images[0] plotting.plot_stat_map( sample_brain_map, threshold=3, title="Before setting back the backend", ) plt.show() # >> Does not show up matplotlib.use(initial_backend) plotting.plot_stat_map( sample_brain_map, threshold=3, title="After setting back the backend", ) plt.show() # >> Shows up ``` </issue> <code> [start of nilearn/plotting/__init__.py] 1 """ 2 Plotting code for nilearn 3 """ 4 # Original Authors: Chris Filo Gorgolewski, Gael Varoquaux 5 import os 6 import sys 7 import importlib 8 9 10 ############################################################################### 11 # Make sure that we don't get DISPLAY problems when running without X on 12 # unices 13 def _set_mpl_backend(): 14 # We are doing local imports here to avoid polluting our namespace 15 try: 16 import matplotlib 17 except ImportError: 18 if importlib.util.find_spec("pytest") is not None: 19 from .._utils.testing import skip_if_running_tests 20 # No need to fail when running tests 21 skip_if_running_tests('matplotlib not installed') 22 raise 23 else: 24 from ..version import (_import_module_with_version_check, 25 OPTIONAL_MATPLOTLIB_MIN_VERSION) 26 # When matplotlib was successfully imported we need to check 27 # that the version is greater that the minimum required one 28 _import_module_with_version_check('matplotlib', 29 OPTIONAL_MATPLOTLIB_MIN_VERSION) 30 current_backend = matplotlib.get_backend().lower() 31 32 if 'inline' in current_backend or 'nbagg' in current_backend: 33 return 34 # Set the backend to a non-interactive one for unices without X 35 # (see gh-2560) 36 if (sys.platform not in ('darwin', 'win32') and 37 'DISPLAY' not in os.environ): 38 matplotlib.use('Agg') 39 40 41 _set_mpl_backend() 42 43 ############################################################################### 44 from . import cm 45 from .img_plotting import ( 46 plot_img, plot_anat, plot_epi, plot_roi, plot_stat_map, 47 plot_glass_brain, plot_connectome, plot_connectome_strength, 48 plot_markers, plot_prob_atlas, plot_carpet, plot_img_comparison, show) 49 from .find_cuts import find_xyz_cut_coords, find_cut_slices, \ 50 find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords 51 from .matrix_plotting import (plot_matrix, plot_contrast_matrix, 52 plot_design_matrix, plot_event) 53 from .html_surface import view_surf, view_img_on_surf 54 from .html_stat_map import view_img 55 from .html_connectome import view_connectome, view_markers 56 from .surf_plotting import (plot_surf, plot_surf_stat_map, plot_surf_roi, 57 plot_img_on_surf, plot_surf_contours) 58 59 __all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi', 60 'plot_roi', 'plot_stat_map', 'plot_glass_brain', 61 'plot_markers', 'plot_connectome', 'plot_prob_atlas', 62 'find_xyz_cut_coords', 'find_cut_slices', 63 'plot_img_comparison', 64 'show', 'plot_matrix', 65 'plot_design_matrix', 'plot_contrast_matrix', 'plot_event', 66 'view_surf', 'view_img_on_surf', 67 'view_img', 'view_connectome', 'view_markers', 68 'find_parcellation_cut_coords', 69 'find_probabilistic_atlas_cut_coords', 70 'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi', 71 'plot_img_on_surf', 'plot_connectome_strength', 'plot_carpet', 72 'plot_surf_contours'] 73 [end of nilearn/plotting/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nilearn/plotting/__init__.py b/nilearn/plotting/__init__.py --- a/nilearn/plotting/__init__.py +++ b/nilearn/plotting/__init__.py @@ -2,9 +2,8 @@ Plotting code for nilearn """ # Original Authors: Chris Filo Gorgolewski, Gael Varoquaux -import os -import sys import importlib +import warnings ############################################################################### @@ -29,13 +28,17 @@ OPTIONAL_MATPLOTLIB_MIN_VERSION) current_backend = matplotlib.get_backend().lower() - if 'inline' in current_backend or 'nbagg' in current_backend: - return - # Set the backend to a non-interactive one for unices without X - # (see gh-2560) - if (sys.platform not in ('darwin', 'win32') and - 'DISPLAY' not in os.environ): - matplotlib.use('Agg') + try: + # Making sure the current backend is usable by matplotlib + matplotlib.use(current_backend) + except Exception: + # If not, switching to default agg backend + matplotlib.use("Agg") + new_backend = matplotlib.get_backend().lower() + + if new_backend != current_backend: + # Matplotlib backend has been changed, let's warn the user + warnings.warn(f"Backend changed to {new_backend}...") _set_mpl_backend()
{"golden_diff": "diff --git a/nilearn/plotting/__init__.py b/nilearn/plotting/__init__.py\n--- a/nilearn/plotting/__init__.py\n+++ b/nilearn/plotting/__init__.py\n@@ -2,9 +2,8 @@\n Plotting code for nilearn\n \"\"\"\n # Original Authors: Chris Filo Gorgolewski, Gael Varoquaux\n-import os\n-import sys\n import importlib\n+import warnings\n \n \n ###############################################################################\n@@ -29,13 +28,17 @@\n OPTIONAL_MATPLOTLIB_MIN_VERSION)\n current_backend = matplotlib.get_backend().lower()\n \n- if 'inline' in current_backend or 'nbagg' in current_backend:\n- return\n- # Set the backend to a non-interactive one for unices without X\n- # (see gh-2560)\n- if (sys.platform not in ('darwin', 'win32') and\n- 'DISPLAY' not in os.environ):\n- matplotlib.use('Agg')\n+ try:\n+ # Making sure the current backend is usable by matplotlib\n+ matplotlib.use(current_backend)\n+ except Exception:\n+ # If not, switching to default agg backend\n+ matplotlib.use(\"Agg\")\n+ new_backend = matplotlib.get_backend().lower()\n+\n+ if new_backend != current_backend:\n+ # Matplotlib backend has been changed, let's warn the user\n+ warnings.warn(f\"Backend changed to {new_backend}...\")\n \n \n _set_mpl_backend()\n", "issue": "nilearn.plotting overrides the matplotlib backend, causing troubles on remote SSH development\nI am working on a remote settings, accessing a machine (drago/margaret for INRIA context) through a PyCharm Remote Interpreter.\r\nI have been struggling to display nilearn figures in the scientific view of PyCharm, leading to a painful process of saving the image => rsync figures dir => visualize figures once there are on personal laptop.\r\n\r\nI did a simple experiment drawing first a simple `plot(list(range(10)))` which is displayed and then a nilearn figure which does not show up (script at the end of the issue).\r\n\r\nI found out the reason for this, which is that in `nilearn.plotting.__init__`, the matplotlib backend is set to `agg` if not in `[inline, agg]`, but the backend used initially (which is working for me) is `module://backend_interagg`.\r\n\r\nSetting the backend to the initial value after importing `nilearn.datasets` fixes the problem.\r\n\r\nI don't know exactly how those matplotlib backends work, if the list of accepted backends in `nilearn.datasets.__init__` could be extended or if we could have a more robust list of valid matplotlib backend. Also, I feel that nilearn shouldn't override matplotlib backend silently? \r\n\r\n<!--Please fill in the following information, to the best of your ability.-->\r\nNilearn version: 0.7.1\r\n\r\n### Expected behavior\r\n\r\nNilearn does not override matplotlib backend silently.\r\n\r\n### Actual behavior\r\n\r\nNilearn sets backend to \"agg\" if not in [\"inline\", \"agg\"], causing trouble with remote SSH development.\r\n\r\n### Steps and code to reproduce bug\r\n\r\nRun the following script through SSH remote interpreter\r\n\r\n```python\r\nimport matplotlib\r\ninitial_backend = matplotlib.get_backend().lower()\r\nprint(initial_backend)\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\narr = np.zeros((100, 100))\r\nplt.figure()\r\nplt.plot(list(range(10)))\r\nplt.show()\r\n# >> The show is displayed in PyCharm\r\n\r\nfrom nilearn import datasets, plotting\r\nprint(matplotlib.get_backend().lower())\r\n# >> Backend has switched to \"agg\"\r\n\r\nsample_brain_map = datasets.fetch_neurovault_motor_task().images[0]\r\nplotting.plot_stat_map(\r\n sample_brain_map,\r\n threshold=3,\r\n title=\"Before setting back the backend\",\r\n)\r\n\r\nplt.show()\r\n# >> Does not show up\r\n\r\nmatplotlib.use(initial_backend)\r\nplotting.plot_stat_map(\r\n sample_brain_map,\r\n threshold=3,\r\n title=\"After setting back the backend\",\r\n)\r\n\r\nplt.show()\r\n# >> Shows up\r\n\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nPlotting code for nilearn\n\"\"\"\n# Original Authors: Chris Filo Gorgolewski, Gael Varoquaux\nimport os\nimport sys\nimport importlib\n\n\n###############################################################################\n# Make sure that we don't get DISPLAY problems when running without X on\n# unices\ndef _set_mpl_backend():\n # We are doing local imports here to avoid polluting our namespace\n try:\n import matplotlib\n except ImportError:\n if importlib.util.find_spec(\"pytest\") is not None:\n from .._utils.testing import skip_if_running_tests\n # No need to fail when running tests\n skip_if_running_tests('matplotlib not installed')\n raise\n else:\n from ..version import (_import_module_with_version_check,\n OPTIONAL_MATPLOTLIB_MIN_VERSION)\n # When matplotlib was successfully imported we need to check\n # that the version is greater that the minimum required one\n _import_module_with_version_check('matplotlib',\n OPTIONAL_MATPLOTLIB_MIN_VERSION)\n current_backend = matplotlib.get_backend().lower()\n\n if 'inline' in current_backend or 'nbagg' in current_backend:\n return\n # Set the backend to a non-interactive one for unices without X\n # (see gh-2560)\n if (sys.platform not in ('darwin', 'win32') and\n 'DISPLAY' not in os.environ):\n matplotlib.use('Agg')\n\n\n_set_mpl_backend()\n\n###############################################################################\nfrom . import cm\nfrom .img_plotting import (\n plot_img, plot_anat, plot_epi, plot_roi, plot_stat_map,\n plot_glass_brain, plot_connectome, plot_connectome_strength,\n plot_markers, plot_prob_atlas, plot_carpet, plot_img_comparison, show)\nfrom .find_cuts import find_xyz_cut_coords, find_cut_slices, \\\n find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords\nfrom .matrix_plotting import (plot_matrix, plot_contrast_matrix,\n plot_design_matrix, plot_event)\nfrom .html_surface import view_surf, view_img_on_surf\nfrom .html_stat_map import view_img\nfrom .html_connectome import view_connectome, view_markers\nfrom .surf_plotting import (plot_surf, plot_surf_stat_map, plot_surf_roi,\n plot_img_on_surf, plot_surf_contours)\n\n__all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi',\n 'plot_roi', 'plot_stat_map', 'plot_glass_brain',\n 'plot_markers', 'plot_connectome', 'plot_prob_atlas',\n 'find_xyz_cut_coords', 'find_cut_slices',\n 'plot_img_comparison',\n 'show', 'plot_matrix',\n 'plot_design_matrix', 'plot_contrast_matrix', 'plot_event',\n 'view_surf', 'view_img_on_surf',\n 'view_img', 'view_connectome', 'view_markers',\n 'find_parcellation_cut_coords',\n 'find_probabilistic_atlas_cut_coords',\n 'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi',\n 'plot_img_on_surf', 'plot_connectome_strength', 'plot_carpet',\n 'plot_surf_contours']\n", "path": "nilearn/plotting/__init__.py"}]}
1,924
338
gh_patches_debug_130
rasdani/github-patches
git_diff
svthalia__concrexit-1750
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Event registration member serializer should be read only ### Describe the bug https://github.com/svthalia/concrexit/blob/4ab37961f50e398cc52422cdc1df66f6ab8ff2ee/website/events/api/v2/serializers/event_registration.py#L34 This serializer should be read-only ### How to reproduce https://staging.thalia.nu/api/v2/events/150/registrations/ shows that you can POST to update the member profile, that should not be the case ### Expected behaviour Be read only </issue> <code> [start of website/events/api/v2/serializers/event_registration.py] 1 from rest_framework import serializers 2 3 from events.models import EventRegistration 4 from members.api.v2.serializers.member import MemberSerializer 5 6 7 class EventRegistrationSerializer(serializers.ModelSerializer): 8 """Serializer for event registrations.""" 9 10 def __init__(self, *args, **kwargs): 11 # Don't pass the 'fields' arg up to the superclass 12 fields = kwargs.pop("fields", {"pk", "member", "name"}) 13 14 # Instantiate the superclass normally 15 super().__init__(*args, **kwargs) 16 17 allowed = set(fields) 18 existing = set(self.fields.keys()) 19 for field_name in existing - allowed: 20 self.fields.pop(field_name) 21 22 class Meta: 23 model = EventRegistration 24 fields = ( 25 "pk", 26 "present", 27 "queue_position", 28 "date", 29 "payment", 30 "member", 31 "name", 32 ) 33 34 member = MemberSerializer(detailed=False) 35 [end of website/events/api/v2/serializers/event_registration.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/events/api/v2/serializers/event_registration.py b/website/events/api/v2/serializers/event_registration.py --- a/website/events/api/v2/serializers/event_registration.py +++ b/website/events/api/v2/serializers/event_registration.py @@ -31,4 +31,4 @@ "name", ) - member = MemberSerializer(detailed=False) + member = MemberSerializer(detailed=False, read_only=True)
{"golden_diff": "diff --git a/website/events/api/v2/serializers/event_registration.py b/website/events/api/v2/serializers/event_registration.py\n--- a/website/events/api/v2/serializers/event_registration.py\n+++ b/website/events/api/v2/serializers/event_registration.py\n@@ -31,4 +31,4 @@\n \"name\",\n )\n \n- member = MemberSerializer(detailed=False)\n+ member = MemberSerializer(detailed=False, read_only=True)\n", "issue": "Event registration member serializer should be read only\n### Describe the bug\r\nhttps://github.com/svthalia/concrexit/blob/4ab37961f50e398cc52422cdc1df66f6ab8ff2ee/website/events/api/v2/serializers/event_registration.py#L34 This serializer should be read-only\r\n\r\n### How to reproduce\r\nhttps://staging.thalia.nu/api/v2/events/150/registrations/ shows that you can POST to update the member profile, that should not be the case\r\n\r\n### Expected behaviour\r\nBe read only\r\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom events.models import EventRegistration\nfrom members.api.v2.serializers.member import MemberSerializer\n\n\nclass EventRegistrationSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for event registrations.\"\"\"\n\n def __init__(self, *args, **kwargs):\n # Don't pass the 'fields' arg up to the superclass\n fields = kwargs.pop(\"fields\", {\"pk\", \"member\", \"name\"})\n\n # Instantiate the superclass normally\n super().__init__(*args, **kwargs)\n\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n class Meta:\n model = EventRegistration\n fields = (\n \"pk\",\n \"present\",\n \"queue_position\",\n \"date\",\n \"payment\",\n \"member\",\n \"name\",\n )\n\n member = MemberSerializer(detailed=False)\n", "path": "website/events/api/v2/serializers/event_registration.py"}]}
936
104
gh_patches_debug_30897
rasdani/github-patches
git_diff
encode__starlette-186
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Session middleware is highly insecure The session middleware does not have any timestamp component to it, which means that if a session happens to be somehow leaked, it can be reused any time in the future. Can we have a timestamp component added to the session, so that the session middleware can reject such sessions after a reasonable period of time (e.g. 24 hours)? </issue> <code> [start of starlette/middleware/sessions.py] 1 import functools 2 import json 3 from base64 import b64decode, b64encode 4 5 import itsdangerous 6 7 from starlette.datastructures import MutableHeaders 8 from starlette.requests import Request 9 from starlette.types import ASGIApp, ASGIInstance, Message, Receive, Scope, Send 10 11 12 class SessionMiddleware: 13 def __init__( 14 self, app: ASGIApp, secret_key: str, session_cookie: str = "session" 15 ) -> None: 16 self.app = app 17 self.signer = itsdangerous.Signer(secret_key) 18 self.session_cookie = session_cookie 19 20 def __call__(self, scope: Scope) -> ASGIInstance: 21 if scope["type"] in ("http", "websocket"): 22 request = Request(scope) 23 if self.session_cookie in request.cookies: 24 data = request.cookies[self.session_cookie].encode("utf-8") 25 data = self.signer.unsign(data) 26 scope["session"] = json.loads(b64decode(data)) 27 else: 28 scope["session"] = {} 29 return functools.partial(self.asgi, scope=scope) 30 return self.app(scope) # pragma: no cover 31 32 async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None: 33 was_empty_session = not scope["session"] 34 inner = self.app(scope) 35 36 async def sender(message: Message) -> None: 37 if message["type"] == "http.response.start": 38 if scope["session"]: 39 # We have session data to persist. 40 data = b64encode(json.dumps(scope["session"]).encode("utf-8")) 41 data = self.signer.sign(data) 42 headers = MutableHeaders(scope=message) 43 header_value = "%s=%s" % (self.session_cookie, data.decode("utf-8")) 44 headers.append("Set-Cookie", header_value) 45 elif not was_empty_session: 46 # The session has been cleared. 47 headers = MutableHeaders(scope=message) 48 header_value = "%s=%s" % ( 49 self.session_cookie, 50 "null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT", 51 ) 52 headers.append("Set-Cookie", header_value) 53 await send(message) 54 55 await inner(receive, sender) 56 [end of starlette/middleware/sessions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py --- a/starlette/middleware/sessions.py +++ b/starlette/middleware/sessions.py @@ -3,6 +3,7 @@ from base64 import b64decode, b64encode import itsdangerous +from itsdangerous.exc import BadTimeSignature, SignatureExpired from starlette.datastructures import MutableHeaders from starlette.requests import Request @@ -11,19 +12,27 @@ class SessionMiddleware: def __init__( - self, app: ASGIApp, secret_key: str, session_cookie: str = "session" + self, + app: ASGIApp, + secret_key: str, + session_cookie: str = "session", + max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds ) -> None: self.app = app - self.signer = itsdangerous.Signer(secret_key) + self.signer = itsdangerous.TimestampSigner(secret_key) self.session_cookie = session_cookie + self.max_age = max_age def __call__(self, scope: Scope) -> ASGIInstance: if scope["type"] in ("http", "websocket"): request = Request(scope) if self.session_cookie in request.cookies: data = request.cookies[self.session_cookie].encode("utf-8") - data = self.signer.unsign(data) - scope["session"] = json.loads(b64decode(data)) + try: + data = self.signer.unsign(data, max_age=self.max_age) + scope["session"] = json.loads(b64decode(data)) + except (BadTimeSignature, SignatureExpired): + scope["session"] = {} else: scope["session"] = {} return functools.partial(self.asgi, scope=scope)
{"golden_diff": "diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py\n--- a/starlette/middleware/sessions.py\n+++ b/starlette/middleware/sessions.py\n@@ -3,6 +3,7 @@\n from base64 import b64decode, b64encode\n \n import itsdangerous\n+from itsdangerous.exc import BadTimeSignature, SignatureExpired\n \n from starlette.datastructures import MutableHeaders\n from starlette.requests import Request\n@@ -11,19 +12,27 @@\n \n class SessionMiddleware:\n def __init__(\n- self, app: ASGIApp, secret_key: str, session_cookie: str = \"session\"\n+ self,\n+ app: ASGIApp,\n+ secret_key: str,\n+ session_cookie: str = \"session\",\n+ max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds\n ) -> None:\n self.app = app\n- self.signer = itsdangerous.Signer(secret_key)\n+ self.signer = itsdangerous.TimestampSigner(secret_key)\n self.session_cookie = session_cookie\n+ self.max_age = max_age\n \n def __call__(self, scope: Scope) -> ASGIInstance:\n if scope[\"type\"] in (\"http\", \"websocket\"):\n request = Request(scope)\n if self.session_cookie in request.cookies:\n data = request.cookies[self.session_cookie].encode(\"utf-8\")\n- data = self.signer.unsign(data)\n- scope[\"session\"] = json.loads(b64decode(data))\n+ try:\n+ data = self.signer.unsign(data, max_age=self.max_age)\n+ scope[\"session\"] = json.loads(b64decode(data))\n+ except (BadTimeSignature, SignatureExpired):\n+ scope[\"session\"] = {}\n else:\n scope[\"session\"] = {}\n return functools.partial(self.asgi, scope=scope)\n", "issue": "Session middleware is highly insecure\nThe session middleware does not have any timestamp component to it, which means that if a session happens to be somehow leaked, it can be reused any time in the future.\r\n\r\nCan we have a timestamp component added to the session, so that the session middleware can reject such sessions after a reasonable period of time (e.g. 24 hours)?\n", "before_files": [{"content": "import functools\nimport json\nfrom base64 import b64decode, b64encode\n\nimport itsdangerous\n\nfrom starlette.datastructures import MutableHeaders\nfrom starlette.requests import Request\nfrom starlette.types import ASGIApp, ASGIInstance, Message, Receive, Scope, Send\n\n\nclass SessionMiddleware:\n def __init__(\n self, app: ASGIApp, secret_key: str, session_cookie: str = \"session\"\n ) -> None:\n self.app = app\n self.signer = itsdangerous.Signer(secret_key)\n self.session_cookie = session_cookie\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n if scope[\"type\"] in (\"http\", \"websocket\"):\n request = Request(scope)\n if self.session_cookie in request.cookies:\n data = request.cookies[self.session_cookie].encode(\"utf-8\")\n data = self.signer.unsign(data)\n scope[\"session\"] = json.loads(b64decode(data))\n else:\n scope[\"session\"] = {}\n return functools.partial(self.asgi, scope=scope)\n return self.app(scope) # pragma: no cover\n\n async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:\n was_empty_session = not scope[\"session\"]\n inner = self.app(scope)\n\n async def sender(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n if scope[\"session\"]:\n # We have session data to persist.\n data = b64encode(json.dumps(scope[\"session\"]).encode(\"utf-8\"))\n data = self.signer.sign(data)\n headers = MutableHeaders(scope=message)\n header_value = \"%s=%s\" % (self.session_cookie, data.decode(\"utf-8\"))\n headers.append(\"Set-Cookie\", header_value)\n elif not was_empty_session:\n # The session has been cleared.\n headers = MutableHeaders(scope=message)\n header_value = \"%s=%s\" % (\n self.session_cookie,\n \"null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT\",\n )\n headers.append(\"Set-Cookie\", header_value)\n await send(message)\n\n await inner(receive, sender)\n", "path": "starlette/middleware/sessions.py"}]}
1,212
429
gh_patches_debug_13477
rasdani/github-patches
git_diff
dj-stripe__dj-stripe-547
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error: Cannot resolve keyword 'customer' into field. All, Cannot get past the step `python manage.py djstripe_init_customers` in the installation. Running Python 3.6.0, Django 1.11, and the latest version of dj-stripe (1.0.0). What combination of Django version and dj-stripe version are folks successfully using at the moment? Thanks! Here is the traceback: ``` Traceback (most recent call last): File "manage.py", line 22, in <module> execute_from_command_line(sys.argv) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/__init__.py", line 363, in execute_from_command_line utility.execute() File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/__init__.py", line 355, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/base.py", line 283, in run_from_argv self.execute(*args, **cmd_options) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/base.py", line 330, in execute output = self.handle(*args, **options) File "/Users/jdln/temp/dj-stripe/djstripe/management/commands/djstripe_init_customers.py", line 25, in handle for subscriber in get_subscriber_model().objects.filter(customer__isnull=True): File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/manager.py", line 85, in manager_method return getattr(self.get_queryset(), name)(*args, **kwargs) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/query.py", line 781, in filter return self._filter_or_exclude(False, *args, **kwargs) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/query.py", line 799, in _filter_or_exclude clone.query.add_q(Q(*args, **kwargs)) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1260, in add_q clause, _ = self._add_q(q_object, self.used_aliases) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1286, in _add_q allow_joins=allow_joins, split_subq=split_subq, File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1164, in build_filter lookups, parts, reffed_expression = self.solve_lookup_type(arg) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1044, in solve_lookup_type _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) File "/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1362, in names_to_path "Choices are: %s" % (name, ", ".join(available))) django.core.exceptions.FieldError: Cannot resolve keyword 'customer' into field. Choices are: date_joined, djstripe_customers, email, first_name, groups, id, is_active, is_staff, is_superuser, last_login, last_name, logentry, password, user_permissions, username ``` </issue> <code> [start of djstripe/management/commands/djstripe_init_customers.py] 1 # -*- coding: utf-8 -*- 2 """ 3 .. module:: djstripe.management.commands.djstripe_init_customers. 4 5 :synopsis: dj-stripe - init_customers command. 6 7 .. moduleauthor:: @kavdev, @pydanny 8 9 """ 10 from __future__ import unicode_literals 11 12 from django.core.management.base import BaseCommand 13 14 from ...models import Customer 15 from ...settings import get_subscriber_model 16 17 18 class Command(BaseCommand): 19 """Create customer objects for existing subscribers that don't have one.""" 20 21 help = "Create customer objects for existing subscribers that don't have one" 22 23 def handle(self, *args, **options): 24 """Create Customer objects for Subscribers without Customer objects associated.""" 25 for subscriber in get_subscriber_model().objects.filter(customer__isnull=True): 26 # use get_or_create in case of race conditions on large subscriber bases 27 Customer.get_or_create(subscriber=subscriber) 28 print("Created subscriber for {0}".format(subscriber.email)) 29 [end of djstripe/management/commands/djstripe_init_customers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/djstripe/management/commands/djstripe_init_customers.py b/djstripe/management/commands/djstripe_init_customers.py --- a/djstripe/management/commands/djstripe_init_customers.py +++ b/djstripe/management/commands/djstripe_init_customers.py @@ -22,7 +22,7 @@ def handle(self, *args, **options): """Create Customer objects for Subscribers without Customer objects associated.""" - for subscriber in get_subscriber_model().objects.filter(customer__isnull=True): + for subscriber in get_subscriber_model().objects.filter(djstripe_customers=None): # use get_or_create in case of race conditions on large subscriber bases Customer.get_or_create(subscriber=subscriber) print("Created subscriber for {0}".format(subscriber.email))
{"golden_diff": "diff --git a/djstripe/management/commands/djstripe_init_customers.py b/djstripe/management/commands/djstripe_init_customers.py\n--- a/djstripe/management/commands/djstripe_init_customers.py\n+++ b/djstripe/management/commands/djstripe_init_customers.py\n@@ -22,7 +22,7 @@\n \n def handle(self, *args, **options):\n \"\"\"Create Customer objects for Subscribers without Customer objects associated.\"\"\"\n- for subscriber in get_subscriber_model().objects.filter(customer__isnull=True):\n+ for subscriber in get_subscriber_model().objects.filter(djstripe_customers=None):\n # use get_or_create in case of race conditions on large subscriber bases\n Customer.get_or_create(subscriber=subscriber)\n print(\"Created subscriber for {0}\".format(subscriber.email))\n", "issue": "Error: Cannot resolve keyword 'customer' into field. \nAll,\r\n\r\nCannot get past the step\r\n\r\n`python manage.py djstripe_init_customers`\r\n\r\nin the installation. \r\n\r\nRunning Python 3.6.0, Django 1.11, and the latest version of dj-stripe (1.0.0).\r\n\r\nWhat combination of Django version and dj-stripe version are folks successfully using at the moment? Thanks! \r\n\r\nHere is the traceback:\r\n```\r\nTraceback (most recent call last):\r\n File \"manage.py\", line 22, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/__init__.py\", line 363, in execute_from_command_line\r\n utility.execute()\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/__init__.py\", line 355, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/base.py\", line 283, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/core/management/base.py\", line 330, in execute\r\n output = self.handle(*args, **options)\r\n File \"/Users/jdln/temp/dj-stripe/djstripe/management/commands/djstripe_init_customers.py\", line 25, in handle\r\n for subscriber in get_subscriber_model().objects.filter(customer__isnull=True):\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/manager.py\", line 85, in manager_method\r\n return getattr(self.get_queryset(), name)(*args, **kwargs)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/query.py\", line 781, in filter\r\n return self._filter_or_exclude(False, *args, **kwargs)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/query.py\", line 799, in _filter_or_exclude\r\n clone.query.add_q(Q(*args, **kwargs))\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1260, in add_q\r\n clause, _ = self._add_q(q_object, self.used_aliases)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1286, in _add_q\r\n allow_joins=allow_joins, split_subq=split_subq,\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1164, in build_filter\r\n lookups, parts, reffed_expression = self.solve_lookup_type(arg)\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1044, in solve_lookup_type\r\n _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())\r\n File \"/Users/jdln/.virtualenvs/djstripe/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1362, in names_to_path\r\n \"Choices are: %s\" % (name, \", \".join(available)))\r\ndjango.core.exceptions.FieldError: Cannot resolve keyword 'customer' into field. Choices are: date_joined, djstripe_customers, email, first_name, groups, id, is_active, is_staff, is_superuser, last_login, last_name, logentry, password, user_permissions, username\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: djstripe.management.commands.djstripe_init_customers.\n\n :synopsis: dj-stripe - init_customers command.\n\n.. moduleauthor:: @kavdev, @pydanny\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom django.core.management.base import BaseCommand\n\nfrom ...models import Customer\nfrom ...settings import get_subscriber_model\n\n\nclass Command(BaseCommand):\n \"\"\"Create customer objects for existing subscribers that don't have one.\"\"\"\n\n help = \"Create customer objects for existing subscribers that don't have one\"\n\n def handle(self, *args, **options):\n \"\"\"Create Customer objects for Subscribers without Customer objects associated.\"\"\"\n for subscriber in get_subscriber_model().objects.filter(customer__isnull=True):\n # use get_or_create in case of race conditions on large subscriber bases\n Customer.get_or_create(subscriber=subscriber)\n print(\"Created subscriber for {0}\".format(subscriber.email))\n", "path": "djstripe/management/commands/djstripe_init_customers.py"}]}
1,700
183
gh_patches_debug_48141
rasdani/github-patches
git_diff
google__flax-270
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `typing._ClassVar` cannot be accessed in the iPython shell – dataclasses package seems to mess up on Python 3.7 ### Problem you have encountered: I just installed flax and tried to import it from the iPython shell. But it raises an `AttributeError`. ``` In [1]: import flax *snip* ~/.virtualenvs/flax2/lib/python3.7/site-packages/dataclasses.py in _is_classvar(a_type, typing) 548 # This test uses a typing internal class, but it's the best way to 549 # test if this is a ClassVar. --> 550 return type(a_type) is typing._ClassVar 551 552 AttributeError: module 'typing' has no attribute '_ClassVar' ``` This does not happen in the normal interpreter, where everything goes fine. ### What you expected to happen: I expected the import to work the same in iPython and the normal python shell. ### Logs, error messages, etc: Full traceback in this gist: https://gist.github.com/bayerj/96f096c7fb09a7c9b758dabdbca32671 ### Steps to reproduce: On Mac OS X with Python 3.7.6, not anaconda, virtuelenvwrapper installed. ``` ❯❯❯ mkvirtualenv flax2 ❯❯❯ pip install jaxlib *snip* ❯❯❯ pip install flax *snip* ❯❯❯ ipython *snip* In [1]: import flax ``` ### Workaround The problem seems to be in the `dataclasses` package–not python's own one–from PyPI. If I uninstall it... ``` ❯❯❯ pip uninstall dataclasses Found existing installation: dataclasses 0.6 Uninstalling dataclasses-0.6: Would remove: /Users/bayerj/.virtualenvs/debug2/lib/python3.7/site-packages/dataclasses-0.6.dist-info/* /Users/bayerj/.virtualenvs/debug2/lib/python3.7/site-packages/dataclasses.py Proceed (y/n)? y Successfully uninstalled dataclasses-0.6 ❯❯❯ ipython /usr/local/lib/python3.7/site-packages/IPython/core/interactiveshell.py:931: UserWarning: Attempting to work in a virtualenv. If you encounter problems, please install IPython inside the virtualenv. warn("Attempting to work in a virtualenv. If you encounter problems, please " Python 3.7.6 (default, Dec 30 2019, 19:38:28) Type 'copyright', 'credits' or 'license' for more information IPython 7.9.0 -- An enhanced Interactive Python. Type '?' for help. In [1]: import flax ``` ... this goes fine. </issue> <code> [start of setup.py] 1 # Copyright 2020 The Flax Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """setup.py for Flax.""" 16 17 import os 18 from setuptools import find_packages 19 from setuptools import setup 20 21 version = "0.1.0" 22 23 here = os.path.abspath(os.path.dirname(__file__)) 24 try: 25 README = open(os.path.join(here, "README.md"), encoding='utf-8').read() 26 except IOError: 27 README = "" 28 29 install_requires = [ 30 "numpy>=1.12", 31 "jax>=0.1.59", 32 "matplotlib", # only needed for tensorboard export 33 "dataclasses", # will only install on py3.6 34 "msgpack", 35 ] 36 37 tests_require = [ 38 "jaxlib", 39 "pytest", 40 "pytest-cov", 41 "pytest-xdist", 42 "tensorflow", 43 "tensorflow_datasets", 44 ] 45 46 setup( 47 name="flax", 48 version=version, 49 description="Flax: A neural network library for JAX designed for flexibility", 50 long_description="\n\n".join([README]), 51 long_description_content_type='text/markdown', 52 classifiers=[ 53 "Development Status :: 3 - Alpha", 54 "Intended Audience :: Developers", 55 "Intended Audience :: Science/Research", 56 "License :: OSI Approved :: MIT License", 57 "Programming Language :: Python :: 3.7", 58 "Topic :: Scientific/Engineering :: Artificial Intelligence", 59 ], 60 keywords="", 61 author="Flax team", 62 author_email="[email protected]", 63 url="https://github.com/google/flax", 64 license="Apache", 65 packages=find_packages(), 66 include_package_data=False, 67 zip_safe=False, 68 install_requires=install_requires, 69 extras_require={ 70 "testing": tests_require, 71 }, 72 ) 73 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ "numpy>=1.12", "jax>=0.1.59", "matplotlib", # only needed for tensorboard export - "dataclasses", # will only install on py3.6 + "dataclasses;python_version<'3.7'", # will only install on py3.6 "msgpack", ]
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,7 @@\n \"numpy>=1.12\",\n \"jax>=0.1.59\",\n \"matplotlib\", # only needed for tensorboard export\n- \"dataclasses\", # will only install on py3.6\n+ \"dataclasses;python_version<'3.7'\", # will only install on py3.6\n \"msgpack\",\n ]\n", "issue": "`typing._ClassVar` cannot be accessed in the iPython shell \u2013 dataclasses package seems to mess up on Python 3.7\n### Problem you have encountered:\r\n\r\nI just installed flax and tried to import it from the iPython shell. But it raises an `AttributeError`.\r\n\r\n```\r\nIn [1]: import flax\r\n\r\n*snip*\r\n\r\n~/.virtualenvs/flax2/lib/python3.7/site-packages/dataclasses.py in _is_classvar(a_type, typing)\r\n 548 # This test uses a typing internal class, but it's the best way to\r\n 549 # test if this is a ClassVar.\r\n--> 550 return type(a_type) is typing._ClassVar\r\n 551\r\n 552\r\n\r\nAttributeError: module 'typing' has no attribute '_ClassVar'\r\n```\r\nThis does not happen in the normal interpreter, where everything goes fine. \r\n\r\n### What you expected to happen:\r\n\r\nI expected the import to work the same in iPython and the normal python shell.\r\n\r\n### Logs, error messages, etc:\r\n\r\nFull traceback in this gist: https://gist.github.com/bayerj/96f096c7fb09a7c9b758dabdbca32671\r\n\r\n### Steps to reproduce:\r\n\r\nOn Mac OS X with Python 3.7.6, not anaconda, virtuelenvwrapper installed.\r\n\r\n```\r\n\u276f\u276f\u276f mkvirtualenv flax2\r\n\u276f\u276f\u276f pip install jaxlib\r\n*snip*\r\n\u276f\u276f\u276f pip install flax\r\n*snip*\r\n\u276f\u276f\u276f ipython\r\n\r\n*snip*\r\n\r\nIn [1]: import flax\r\n```\r\n\r\n### Workaround\r\n\r\nThe problem seems to be in the `dataclasses` package\u2013not python's own one\u2013from PyPI. If I uninstall it...\r\n\r\n```\r\n\u276f\u276f\u276f pip uninstall dataclasses\r\nFound existing installation: dataclasses 0.6\r\nUninstalling dataclasses-0.6:\r\n Would remove:\r\n /Users/bayerj/.virtualenvs/debug2/lib/python3.7/site-packages/dataclasses-0.6.dist-info/*\r\n /Users/bayerj/.virtualenvs/debug2/lib/python3.7/site-packages/dataclasses.py\r\nProceed (y/n)? y\r\n Successfully uninstalled dataclasses-0.6\r\n\u276f\u276f\u276f ipython\r\n/usr/local/lib/python3.7/site-packages/IPython/core/interactiveshell.py:931: UserWarning: Attempting to work in a virtualenv. If you encounter problems, please install IPython inside the virtualenv.\r\n warn(\"Attempting to work in a virtualenv. If you encounter problems, please \"\r\nPython 3.7.6 (default, Dec 30 2019, 19:38:28)\r\nType 'copyright', 'credits' or 'license' for more information\r\nIPython 7.9.0 -- An enhanced Interactive Python. Type '?' for help.\r\n\r\nIn [1]: import flax\r\n```\r\n... this goes fine.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"setup.py for Flax.\"\"\"\n\nimport os\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nversion = \"0.1.0\"\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, \"README.md\"), encoding='utf-8').read()\nexcept IOError:\n README = \"\"\n\ninstall_requires = [\n \"numpy>=1.12\",\n \"jax>=0.1.59\",\n \"matplotlib\", # only needed for tensorboard export\n \"dataclasses\", # will only install on py3.6\n \"msgpack\",\n]\n\ntests_require = [\n \"jaxlib\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-xdist\",\n \"tensorflow\",\n \"tensorflow_datasets\",\n]\n\nsetup(\n name=\"flax\",\n version=version,\n description=\"Flax: A neural network library for JAX designed for flexibility\",\n long_description=\"\\n\\n\".join([README]),\n long_description_content_type='text/markdown',\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"\",\n author=\"Flax team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/google/flax\",\n license=\"Apache\",\n packages=find_packages(),\n include_package_data=False,\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\n \"testing\": tests_require,\n },\n )\n", "path": "setup.py"}]}
1,820
112